summaryrefslogtreecommitdiffstats
path: root/security/sandbox/chromium/base
diff options
context:
space:
mode:
Diffstat (limited to 'security/sandbox/chromium/base')
-rw-r--r--security/sandbox/chromium/base/at_exit.cc114
-rw-r--r--security/sandbox/chromium/base/at_exit.h87
-rw-r--r--security/sandbox/chromium/base/atomic_ref_count.h69
-rw-r--r--security/sandbox/chromium/base/atomic_sequence_num.h33
-rw-r--r--security/sandbox/chromium/base/atomicops.h150
-rw-r--r--security/sandbox/chromium/base/atomicops_internals_portable.h219
-rw-r--r--security/sandbox/chromium/base/atomicops_internals_x86_msvc.h179
-rw-r--r--security/sandbox/chromium/base/base_export.h29
-rw-r--r--security/sandbox/chromium/base/base_paths.h55
-rw-r--r--security/sandbox/chromium/base/base_paths_win.h53
-rw-r--r--security/sandbox/chromium/base/base_switches.cc149
-rw-r--r--security/sandbox/chromium/base/base_switches.h60
-rw-r--r--security/sandbox/chromium/base/bind.h470
-rw-r--r--security/sandbox/chromium/base/bind_helpers.h69
-rw-r--r--security/sandbox/chromium/base/bind_internal.h1050
-rw-r--r--security/sandbox/chromium/base/bit_cast.h77
-rw-r--r--security/sandbox/chromium/base/bits.h209
-rw-r--r--security/sandbox/chromium/base/callback.h149
-rw-r--r--security/sandbox/chromium/base/callback_forward.h28
-rw-r--r--security/sandbox/chromium/base/callback_internal.cc101
-rw-r--r--security/sandbox/chromium/base/callback_internal.h194
-rw-r--r--security/sandbox/chromium/base/compiler_specific.h298
-rw-r--r--security/sandbox/chromium/base/containers/adapters.h55
-rw-r--r--security/sandbox/chromium/base/containers/buffer_iterator.h145
-rw-r--r--security/sandbox/chromium/base/containers/checked_iterators.h205
-rw-r--r--security/sandbox/chromium/base/containers/circular_deque.h1112
-rw-r--r--security/sandbox/chromium/base/containers/span.h530
-rw-r--r--security/sandbox/chromium/base/containers/stack.h23
-rw-r--r--security/sandbox/chromium/base/containers/util.h21
-rw-r--r--security/sandbox/chromium/base/containers/vector_buffer.h188
-rw-r--r--security/sandbox/chromium/base/cpu.cc312
-rw-r--r--security/sandbox/chromium/base/cpu.h104
-rw-r--r--security/sandbox/chromium/base/debug/alias.cc16
-rw-r--r--security/sandbox/chromium/base/debug/alias.h44
-rw-r--r--security/sandbox/chromium/base/debug/crash_logging.h104
-rw-r--r--security/sandbox/chromium/base/debug/debugger.h50
-rw-r--r--security/sandbox/chromium/base/debug/leak_annotations.h46
-rw-r--r--security/sandbox/chromium/base/debug/profiler.cc180
-rw-r--r--security/sandbox/chromium/base/debug/profiler.h76
-rw-r--r--security/sandbox/chromium/base/environment.cc123
-rw-r--r--security/sandbox/chromium/base/environment.h61
-rw-r--r--security/sandbox/chromium/base/file_descriptor_posix.h61
-rw-r--r--security/sandbox/chromium/base/files/file_path.h484
-rw-r--r--security/sandbox/chromium/base/files/file_path_constants.cc25
-rw-r--r--security/sandbox/chromium/base/format_macros.h97
-rw-r--r--security/sandbox/chromium/base/guid.h46
-rw-r--r--security/sandbox/chromium/base/hash/hash.cc167
-rw-r--r--security/sandbox/chromium/base/hash/hash.h86
-rw-r--r--security/sandbox/chromium/base/immediate_crash.h168
-rw-r--r--security/sandbox/chromium/base/lazy_instance.h210
-rw-r--r--security/sandbox/chromium/base/lazy_instance_helpers.cc64
-rw-r--r--security/sandbox/chromium/base/lazy_instance_helpers.h101
-rw-r--r--security/sandbox/chromium/base/location.cc96
-rw-r--r--security/sandbox/chromium/base/location.h142
-rw-r--r--security/sandbox/chromium/base/logging.h1077
-rw-r--r--security/sandbox/chromium/base/macros.h48
-rw-r--r--security/sandbox/chromium/base/memory/aligned_memory.h60
-rw-r--r--security/sandbox/chromium/base/memory/free_deleter.h25
-rw-r--r--security/sandbox/chromium/base/memory/platform_shared_memory_region.cc62
-rw-r--r--security/sandbox/chromium/base/memory/platform_shared_memory_region.h301
-rw-r--r--security/sandbox/chromium/base/memory/platform_shared_memory_region_win.cc343
-rw-r--r--security/sandbox/chromium/base/memory/ptr_util.h23
-rw-r--r--security/sandbox/chromium/base/memory/raw_scoped_refptr_mismatch_checker.h52
-rw-r--r--security/sandbox/chromium/base/memory/ref_counted.cc105
-rw-r--r--security/sandbox/chromium/base/memory/ref_counted.h463
-rw-r--r--security/sandbox/chromium/base/memory/scoped_refptr.h375
-rw-r--r--security/sandbox/chromium/base/memory/shared_memory_mapping.cc115
-rw-r--r--security/sandbox/chromium/base/memory/shared_memory_mapping.h252
-rw-r--r--security/sandbox/chromium/base/memory/singleton.h279
-rw-r--r--security/sandbox/chromium/base/memory/unsafe_shared_memory_region.cc80
-rw-r--r--security/sandbox/chromium/base/memory/unsafe_shared_memory_region.h127
-rw-r--r--security/sandbox/chromium/base/memory/weak_ptr.h395
-rw-r--r--security/sandbox/chromium/base/no_destructor.h98
-rw-r--r--security/sandbox/chromium/base/numerics/checked_math.h393
-rw-r--r--security/sandbox/chromium/base/numerics/checked_math_impl.h567
-rw-r--r--security/sandbox/chromium/base/numerics/clamped_math.h264
-rw-r--r--security/sandbox/chromium/base/numerics/clamped_math_impl.h341
-rw-r--r--security/sandbox/chromium/base/numerics/safe_conversions.h358
-rw-r--r--security/sandbox/chromium/base/numerics/safe_conversions_arm_impl.h51
-rw-r--r--security/sandbox/chromium/base/numerics/safe_conversions_impl.h851
-rw-r--r--security/sandbox/chromium/base/numerics/safe_math.h12
-rw-r--r--security/sandbox/chromium/base/numerics/safe_math_arm_impl.h122
-rw-r--r--security/sandbox/chromium/base/numerics/safe_math_clang_gcc_impl.h157
-rw-r--r--security/sandbox/chromium/base/numerics/safe_math_shared_impl.h240
-rw-r--r--security/sandbox/chromium/base/optional.h937
-rw-r--r--security/sandbox/chromium/base/os_compat_android.h21
-rw-r--r--security/sandbox/chromium/base/path_service.h94
-rw-r--r--security/sandbox/chromium/base/posix/can_lower_nice_to.cc60
-rw-r--r--security/sandbox/chromium/base/posix/can_lower_nice_to.h19
-rw-r--r--security/sandbox/chromium/base/posix/eintr_wrapper.h68
-rw-r--r--security/sandbox/chromium/base/posix/safe_strerror.cc128
-rw-r--r--security/sandbox/chromium/base/posix/safe_strerror.h44
-rw-r--r--security/sandbox/chromium/base/process/environment_internal.cc128
-rw-r--r--security/sandbox/chromium/base/process/environment_internal.h52
-rw-r--r--security/sandbox/chromium/base/process/kill.h162
-rw-r--r--security/sandbox/chromium/base/process/memory.h89
-rw-r--r--security/sandbox/chromium/base/process/process.h223
-rw-r--r--security/sandbox/chromium/base/process/process_handle.h142
-rw-r--r--security/sandbox/chromium/base/process/process_handle_win.cc52
-rw-r--r--security/sandbox/chromium/base/rand_util.h78
-rw-r--r--security/sandbox/chromium/base/rand_util_win.cc38
-rw-r--r--security/sandbox/chromium/base/scoped_clear_last_error.h58
-rw-r--r--security/sandbox/chromium/base/scoped_clear_last_error_win.cc22
-rw-r--r--security/sandbox/chromium/base/sequence_checker.h143
-rw-r--r--security/sandbox/chromium/base/sequence_checker_impl.h63
-rw-r--r--security/sandbox/chromium/base/sequence_token.h115
-rw-r--r--security/sandbox/chromium/base/sequenced_task_runner.h201
-rw-r--r--security/sandbox/chromium/base/sequenced_task_runner_helpers.h42
-rw-r--r--security/sandbox/chromium/base/single_thread_task_runner.h36
-rw-r--r--security/sandbox/chromium/base/stl_util.h681
-rw-r--r--security/sandbox/chromium/base/strings/char_traits.h92
-rw-r--r--security/sandbox/chromium/base/strings/nullable_string16.cc33
-rw-r--r--security/sandbox/chromium/base/strings/nullable_string16.h55
-rw-r--r--security/sandbox/chromium/base/strings/safe_sprintf.cc682
-rw-r--r--security/sandbox/chromium/base/strings/safe_sprintf.h246
-rw-r--r--security/sandbox/chromium/base/strings/safe_sprintf_unittest.cc765
-rw-r--r--security/sandbox/chromium/base/strings/string16.cc87
-rw-r--r--security/sandbox/chromium/base/strings/string16.h229
-rw-r--r--security/sandbox/chromium/base/strings/string_number_conversions.cc545
-rw-r--r--security/sandbox/chromium/base/strings/string_number_conversions.h157
-rw-r--r--security/sandbox/chromium/base/strings/string_piece.cc426
-rw-r--r--security/sandbox/chromium/base/strings/string_piece.h513
-rw-r--r--security/sandbox/chromium/base/strings/string_piece_forward.h24
-rw-r--r--security/sandbox/chromium/base/strings/string_split.cc254
-rw-r--r--security/sandbox/chromium/base/strings/string_split.h169
-rw-r--r--security/sandbox/chromium/base/strings/string_util.cc1057
-rw-r--r--security/sandbox/chromium/base/strings/string_util.h549
-rw-r--r--security/sandbox/chromium/base/strings/string_util_constants.cc54
-rw-r--r--security/sandbox/chromium/base/strings/string_util_posix.h37
-rw-r--r--security/sandbox/chromium/base/strings/string_util_win.h44
-rw-r--r--security/sandbox/chromium/base/strings/stringprintf.cc225
-rw-r--r--security/sandbox/chromium/base/strings/stringprintf.h74
-rw-r--r--security/sandbox/chromium/base/strings/utf_string_conversion_utils.cc155
-rw-r--r--security/sandbox/chromium/base/strings/utf_string_conversion_utils.h103
-rw-r--r--security/sandbox/chromium/base/strings/utf_string_conversions.cc342
-rw-r--r--security/sandbox/chromium/base/strings/utf_string_conversions.h54
-rw-r--r--security/sandbox/chromium/base/synchronization/atomic_flag.h50
-rw-r--r--security/sandbox/chromium/base/synchronization/condition_variable.h135
-rw-r--r--security/sandbox/chromium/base/synchronization/condition_variable_posix.cc149
-rw-r--r--security/sandbox/chromium/base/synchronization/lock.cc38
-rw-r--r--security/sandbox/chromium/base/synchronization/lock.h133
-rw-r--r--security/sandbox/chromium/base/synchronization/lock_impl.h175
-rw-r--r--security/sandbox/chromium/base/synchronization/lock_impl_posix.cc133
-rw-r--r--security/sandbox/chromium/base/synchronization/lock_impl_win.cc40
-rw-r--r--security/sandbox/chromium/base/synchronization/waitable_event.h291
-rw-r--r--security/sandbox/chromium/base/synchronization/waitable_event_posix.cc445
-rw-r--r--security/sandbox/chromium/base/task_runner.h136
-rw-r--r--security/sandbox/chromium/base/template_util.h188
-rw-r--r--security/sandbox/chromium/base/third_party/cityhash/COPYING19
-rw-r--r--security/sandbox/chromium/base/third_party/cityhash/city.cc532
-rw-r--r--security/sandbox/chromium/base/third_party/cityhash/city.h129
-rw-r--r--security/sandbox/chromium/base/third_party/dynamic_annotations/LICENSE28
-rw-r--r--security/sandbox/chromium/base/third_party/dynamic_annotations/dynamic_annotations.h595
-rw-r--r--security/sandbox/chromium/base/third_party/icu/LICENSE76
-rw-r--r--security/sandbox/chromium/base/third_party/icu/icu_utf.cc131
-rw-r--r--security/sandbox/chromium/base/third_party/icu/icu_utf.h442
-rw-r--r--security/sandbox/chromium/base/third_party/superfasthash/LICENSE27
-rw-r--r--security/sandbox/chromium/base/third_party/superfasthash/README.chromium29
-rw-r--r--security/sandbox/chromium/base/third_party/superfasthash/superfasthash.c84
-rw-r--r--security/sandbox/chromium/base/third_party/valgrind/LICENSE39
-rw-r--r--security/sandbox/chromium/base/third_party/valgrind/valgrind.h4792
-rw-r--r--security/sandbox/chromium/base/thread_annotations.h264
-rw-r--r--security/sandbox/chromium/base/threading/platform_thread.cc51
-rw-r--r--security/sandbox/chromium/base/threading/platform_thread.h259
-rw-r--r--security/sandbox/chromium/base/threading/platform_thread_internal_posix.cc39
-rw-r--r--security/sandbox/chromium/base/threading/platform_thread_internal_posix.h62
-rw-r--r--security/sandbox/chromium/base/threading/platform_thread_posix.cc361
-rw-r--r--security/sandbox/chromium/base/threading/platform_thread_win.cc463
-rw-r--r--security/sandbox/chromium/base/threading/platform_thread_win.h23
-rw-r--r--security/sandbox/chromium/base/threading/thread_checker_impl.h74
-rw-r--r--security/sandbox/chromium/base/threading/thread_collision_warner.cc64
-rw-r--r--security/sandbox/chromium/base/threading/thread_collision_warner.h252
-rw-r--r--security/sandbox/chromium/base/threading/thread_id_name_manager.cc147
-rw-r--r--security/sandbox/chromium/base/threading/thread_id_name_manager.h94
-rw-r--r--security/sandbox/chromium/base/threading/thread_local.h136
-rw-r--r--security/sandbox/chromium/base/threading/thread_local_internal.h80
-rw-r--r--security/sandbox/chromium/base/threading/thread_local_storage.cc461
-rw-r--r--security/sandbox/chromium/base/threading/thread_local_storage.h175
-rw-r--r--security/sandbox/chromium/base/threading/thread_local_storage_posix.cc30
-rw-r--r--security/sandbox/chromium/base/threading/thread_local_storage_win.cc107
-rw-r--r--security/sandbox/chromium/base/threading/thread_restrictions.cc258
-rw-r--r--security/sandbox/chromium/base/threading/thread_restrictions.h680
-rw-r--r--security/sandbox/chromium/base/time/time.cc433
-rw-r--r--security/sandbox/chromium/base/time/time.h1077
-rw-r--r--security/sandbox/chromium/base/time/time_exploded_posix.cc287
-rw-r--r--security/sandbox/chromium/base/time/time_now_posix.cc122
-rw-r--r--security/sandbox/chromium/base/time/time_override.h74
-rw-r--r--security/sandbox/chromium/base/time/time_win.cc810
-rw-r--r--security/sandbox/chromium/base/time/time_win_features.cc14
-rw-r--r--security/sandbox/chromium/base/time/time_win_features.h20
-rw-r--r--security/sandbox/chromium/base/token.cc28
-rw-r--r--security/sandbox/chromium/base/token.h72
-rw-r--r--security/sandbox/chromium/base/tuple.h112
-rw-r--r--security/sandbox/chromium/base/unguessable_token.cc39
-rw-r--r--security/sandbox/chromium/base/unguessable_token.h120
-rw-r--r--security/sandbox/chromium/base/version.cc194
-rw-r--r--security/sandbox/chromium/base/version.h77
-rw-r--r--security/sandbox/chromium/base/win/current_module.h17
-rw-r--r--security/sandbox/chromium/base/win/pe_image.cc652
-rw-r--r--security/sandbox/chromium/base/win/pe_image.h308
-rw-r--r--security/sandbox/chromium/base/win/scoped_handle.cc44
-rw-r--r--security/sandbox/chromium/base/win/scoped_handle.h184
-rw-r--r--security/sandbox/chromium/base/win/scoped_handle_verifier.cc238
-rw-r--r--security/sandbox/chromium/base/win/scoped_handle_verifier.h88
-rw-r--r--security/sandbox/chromium/base/win/scoped_process_information.cc107
-rw-r--r--security/sandbox/chromium/base/win/scoped_process_information.h75
-rw-r--r--security/sandbox/chromium/base/win/startup_information.cc59
-rw-r--r--security/sandbox/chromium/base/win/startup_information.h53
-rw-r--r--security/sandbox/chromium/base/win/static_constants.cc13
-rw-r--r--security/sandbox/chromium/base/win/static_constants.h21
-rw-r--r--security/sandbox/chromium/base/win/windows_types.h278
-rw-r--r--security/sandbox/chromium/base/win/windows_version.cc313
-rw-r--r--security/sandbox/chromium/base/win/windows_version.h187
213 files changed, 46319 insertions, 0 deletions
diff --git a/security/sandbox/chromium/base/at_exit.cc b/security/sandbox/chromium/base/at_exit.cc
new file mode 100644
index 0000000000..eb7d26cdc7
--- /dev/null
+++ b/security/sandbox/chromium/base/at_exit.cc
@@ -0,0 +1,114 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/at_exit.h"
+
+#include <stddef.h>
+#include <ostream>
+#include <utility>
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/logging.h"
+
+namespace base {
+
+// Keep a stack of registered AtExitManagers. We always operate on the most
+// recent, and we should never have more than one outside of testing (for a
+// statically linked version of this library). Testing may use the shadow
+// version of the constructor, and if we are building a dynamic library we may
+// end up with multiple AtExitManagers on the same process. We don't protect
+// this for thread-safe access, since it will only be modified in testing.
+static AtExitManager* g_top_manager = nullptr;
+
+static bool g_disable_managers = false;
+
+AtExitManager::AtExitManager() : next_manager_(g_top_manager) {
+// If multiple modules instantiate AtExitManagers they'll end up living in this
+// module... they have to coexist.
+#if !defined(COMPONENT_BUILD)
+ DCHECK(!g_top_manager);
+#endif
+ g_top_manager = this;
+}
+
+AtExitManager::~AtExitManager() {
+ if (!g_top_manager) {
+ NOTREACHED() << "Tried to ~AtExitManager without an AtExitManager";
+ return;
+ }
+ DCHECK_EQ(this, g_top_manager);
+
+ if (!g_disable_managers)
+ ProcessCallbacksNow();
+ g_top_manager = next_manager_;
+}
+
+// static
+void AtExitManager::RegisterCallback(AtExitCallbackType func, void* param) {
+ DCHECK(func);
+ RegisterTask(base::BindOnce(func, param));
+}
+
+// static
+void AtExitManager::RegisterTask(base::OnceClosure task) {
+ if (!g_top_manager) {
+ NOTREACHED() << "Tried to RegisterCallback without an AtExitManager";
+ return;
+ }
+
+ AutoLock lock(g_top_manager->lock_);
+#if DCHECK_IS_ON()
+ DCHECK(!g_top_manager->processing_callbacks_);
+#endif
+ g_top_manager->stack_.push(std::move(task));
+}
+
+// static
+void AtExitManager::ProcessCallbacksNow() {
+ if (!g_top_manager) {
+ NOTREACHED() << "Tried to ProcessCallbacksNow without an AtExitManager";
+ return;
+ }
+
+ // Callbacks may try to add new callbacks, so run them without holding
+ // |lock_|. This is an error and caught by the DCHECK in RegisterTask(), but
+ // handle it gracefully in release builds so we don't deadlock.
+ base::stack<base::OnceClosure> tasks;
+ {
+ AutoLock lock(g_top_manager->lock_);
+ tasks.swap(g_top_manager->stack_);
+#if DCHECK_IS_ON()
+ g_top_manager->processing_callbacks_ = true;
+#endif
+ }
+
+ // Relax the cross-thread access restriction to non-thread-safe RefCount.
+ // It's safe since all other threads should be terminated at this point.
+ ScopedAllowCrossThreadRefCountAccess allow_cross_thread_ref_count_access;
+
+ while (!tasks.empty()) {
+ std::move(tasks.top()).Run();
+ tasks.pop();
+ }
+
+#if DCHECK_IS_ON()
+ AutoLock lock(g_top_manager->lock_);
+ // Expect that all callbacks have been run.
+ DCHECK(g_top_manager->stack_.empty());
+ g_top_manager->processing_callbacks_ = false;
+#endif
+}
+
+void AtExitManager::DisableAllAtExitManagers() {
+ AutoLock lock(g_top_manager->lock_);
+ g_disable_managers = true;
+}
+
+AtExitManager::AtExitManager(bool shadow) : next_manager_(g_top_manager) {
+ DCHECK(shadow || !g_top_manager);
+ g_top_manager = this;
+}
+
+} // namespace base
diff --git a/security/sandbox/chromium/base/at_exit.h b/security/sandbox/chromium/base/at_exit.h
new file mode 100644
index 0000000000..fa652ac0c9
--- /dev/null
+++ b/security/sandbox/chromium/base/at_exit.h
@@ -0,0 +1,87 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_AT_EXIT_H_
+#define BASE_AT_EXIT_H_
+
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/containers/stack.h"
+#include "base/macros.h"
+#include "base/synchronization/lock.h"
+#include "base/thread_annotations.h"
+
+namespace base {
+
+// This class provides a facility similar to the CRT atexit(), except that
+// we control when the callbacks are executed. Under Windows for a DLL they
+// happen at a really bad time and under the loader lock. This facility is
+// mostly used by base::Singleton.
+//
+// The usage is simple. Early in the main() or WinMain() scope create an
+// AtExitManager object on the stack:
+// int main(...) {
+// base::AtExitManager exit_manager;
+//
+// }
+// When the exit_manager object goes out of scope, all the registered
+// callbacks and singleton destructors will be called.
+
+class BASE_EXPORT AtExitManager {
+ public:
+ typedef void (*AtExitCallbackType)(void*);
+
+ AtExitManager();
+
+ // The dtor calls all the registered callbacks. Do not try to register more
+ // callbacks after this point.
+ ~AtExitManager();
+
+ // Registers the specified function to be called at exit. The prototype of
+ // the callback function is void func(void*).
+ static void RegisterCallback(AtExitCallbackType func, void* param);
+
+ // Registers the specified task to be called at exit.
+ static void RegisterTask(base::OnceClosure task);
+
+ // Calls the functions registered with RegisterCallback in LIFO order. It
+ // is possible to register new callbacks after calling this function.
+ static void ProcessCallbacksNow();
+
+ // Disable all registered at-exit callbacks. This is used only in a single-
+ // process mode.
+ static void DisableAllAtExitManagers();
+
+ protected:
+ // This constructor will allow this instance of AtExitManager to be created
+ // even if one already exists. This should only be used for testing!
+ // AtExitManagers are kept on a global stack, and it will be removed during
+ // destruction. This allows you to shadow another AtExitManager.
+ explicit AtExitManager(bool shadow);
+
+ private:
+ base::Lock lock_;
+
+ base::stack<base::OnceClosure> stack_ GUARDED_BY(lock_);
+
+#if DCHECK_IS_ON()
+ bool processing_callbacks_ GUARDED_BY(lock_) = false;
+#endif
+
+ // Stack of managers to allow shadowing.
+ AtExitManager* const next_manager_;
+
+ DISALLOW_COPY_AND_ASSIGN(AtExitManager);
+};
+
+#if defined(UNIT_TEST)
+class ShadowingAtExitManager : public AtExitManager {
+ public:
+ ShadowingAtExitManager() : AtExitManager(true) {}
+};
+#endif // defined(UNIT_TEST)
+
+} // namespace base
+
+#endif // BASE_AT_EXIT_H_
diff --git a/security/sandbox/chromium/base/atomic_ref_count.h b/security/sandbox/chromium/base/atomic_ref_count.h
new file mode 100644
index 0000000000..5e48c82380
--- /dev/null
+++ b/security/sandbox/chromium/base/atomic_ref_count.h
@@ -0,0 +1,69 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This is a low level implementation of atomic semantics for reference
+// counting. Please use base/memory/ref_counted.h directly instead.
+
+#ifndef BASE_ATOMIC_REF_COUNT_H_
+#define BASE_ATOMIC_REF_COUNT_H_
+
+#include <atomic>
+
+namespace base {
+
+class AtomicRefCount {
+ public:
+ constexpr AtomicRefCount() : ref_count_(0) {}
+ explicit constexpr AtomicRefCount(int initial_value)
+ : ref_count_(initial_value) {}
+
+ // Increment a reference count.
+ // Returns the previous value of the count.
+ int Increment() { return Increment(1); }
+
+ // Increment a reference count by "increment", which must exceed 0.
+ // Returns the previous value of the count.
+ int Increment(int increment) {
+ return ref_count_.fetch_add(increment, std::memory_order_relaxed);
+ }
+
+ // Decrement a reference count, and return whether the result is non-zero.
+ // Insert barriers to ensure that state written before the reference count
+ // became zero will be visible to a thread that has just made the count zero.
+ bool Decrement() {
+ // TODO(jbroman): Technically this doesn't need to be an acquire operation
+ // unless the result is 1 (i.e., the ref count did indeed reach zero).
+ // However, there are toolchain issues that make that not work as well at
+ // present (notably TSAN doesn't like it).
+ return ref_count_.fetch_sub(1, std::memory_order_acq_rel) != 1;
+ }
+
+ // Return whether the reference count is one. If the reference count is used
+ // in the conventional way, a refrerence count of 1 implies that the current
+ // thread owns the reference and no other thread shares it. This call
+ // performs the test for a reference count of one, and performs the memory
+ // barrier needed for the owning thread to act on the object, knowing that it
+ // has exclusive access to the object.
+ bool IsOne() const { return ref_count_.load(std::memory_order_acquire) == 1; }
+
+ // Return whether the reference count is zero. With conventional object
+ // referencing counting, the object will be destroyed, so the reference count
+ // should never be zero. Hence this is generally used for a debug check.
+ bool IsZero() const {
+ return ref_count_.load(std::memory_order_acquire) == 0;
+ }
+
+ // Returns the current reference count (with no barriers). This is subtle, and
+ // should be used only for debugging.
+ int SubtleRefCountForDebug() const {
+ return ref_count_.load(std::memory_order_relaxed);
+ }
+
+ private:
+ std::atomic_int ref_count_;
+};
+
+} // namespace base
+
+#endif // BASE_ATOMIC_REF_COUNT_H_
diff --git a/security/sandbox/chromium/base/atomic_sequence_num.h b/security/sandbox/chromium/base/atomic_sequence_num.h
new file mode 100644
index 0000000000..717e37a60b
--- /dev/null
+++ b/security/sandbox/chromium/base/atomic_sequence_num.h
@@ -0,0 +1,33 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ATOMIC_SEQUENCE_NUM_H_
+#define BASE_ATOMIC_SEQUENCE_NUM_H_
+
+#include <atomic>
+
+#include "base/macros.h"
+
+namespace base {
+
+// AtomicSequenceNumber is a thread safe increasing sequence number generator.
+// Its constructor doesn't emit a static initializer, so it's safe to use as a
+// global variable or static member.
+class AtomicSequenceNumber {
+ public:
+ constexpr AtomicSequenceNumber() = default;
+
+ // Returns an increasing sequence number starts from 0 for each call.
+ // This function can be called from any thread without data race.
+ inline int GetNext() { return seq_.fetch_add(1, std::memory_order_relaxed); }
+
+ private:
+ std::atomic_int seq_{0};
+
+ DISALLOW_COPY_AND_ASSIGN(AtomicSequenceNumber);
+};
+
+} // namespace base
+
+#endif // BASE_ATOMIC_SEQUENCE_NUM_H_
diff --git a/security/sandbox/chromium/base/atomicops.h b/security/sandbox/chromium/base/atomicops.h
new file mode 100644
index 0000000000..429e2457fc
--- /dev/null
+++ b/security/sandbox/chromium/base/atomicops.h
@@ -0,0 +1,150 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// For atomic operations on reference counts, see atomic_refcount.h.
+// For atomic operations on sequence numbers, see atomic_sequence_num.h.
+
+// The routines exported by this module are subtle. If you use them, even if
+// you get the code right, it will depend on careful reasoning about atomicity
+// and memory ordering; it will be less readable, and harder to maintain. If
+// you plan to use these routines, you should have a good reason, such as solid
+// evidence that performance would otherwise suffer, or there being no
+// alternative. You should assume only properties explicitly guaranteed by the
+// specifications in this file. You are almost certainly _not_ writing code
+// just for the x86; if you assume x86 semantics, x86 hardware bugs and
+// implementations on other archtectures will cause your code to break. If you
+// do not know what you are doing, avoid these routines, and use a Mutex.
+//
+// It is incorrect to make direct assignments to/from an atomic variable.
+// You should use one of the Load or Store routines. The NoBarrier
+// versions are provided when no barriers are needed:
+// NoBarrier_Store()
+// NoBarrier_Load()
+// Although there are currently no compiler enforcement, you are encouraged
+// to use these.
+//
+
+#ifndef BASE_ATOMICOPS_H_
+#define BASE_ATOMICOPS_H_
+
+#include <stdint.h>
+
+// Small C++ header which defines implementation specific macros used to
+// identify the STL implementation.
+// - libc++: captures __config for _LIBCPP_VERSION
+// - libstdc++: captures bits/c++config.h for __GLIBCXX__
+#include <cstddef>
+
+#include "base/base_export.h"
+#include "build/build_config.h"
+
+namespace base {
+namespace subtle {
+
+typedef int32_t Atomic32;
+#ifdef ARCH_CPU_64_BITS
+// We need to be able to go between Atomic64 and AtomicWord implicitly. This
+// means Atomic64 and AtomicWord should be the same type on 64-bit.
+#if defined(__ILP32__) || defined(OS_NACL)
+// NaCl's intptr_t is not actually 64-bits on 64-bit!
+// http://code.google.com/p/nativeclient/issues/detail?id=1162
+typedef int64_t Atomic64;
+#else
+typedef intptr_t Atomic64;
+#endif
+#endif
+
+// Use AtomicWord for a machine-sized pointer. It will use the Atomic32 or
+// Atomic64 routines below, depending on your architecture.
+typedef intptr_t AtomicWord;
+
+// Atomically execute:
+// result = *ptr;
+// if (*ptr == old_value)
+// *ptr = new_value;
+// return result;
+//
+// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
+// Always return the old value of "*ptr"
+//
+// This routine implies no memory barriers.
+Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value);
+
+// Atomically store new_value into *ptr, returning the previous value held in
+// *ptr. This routine implies no memory barriers.
+Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value);
+
+// Atomically increment *ptr by "increment". Returns the new value of
+// *ptr with the increment applied. This routine implies no memory barriers.
+Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment);
+
+Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment);
+
+// These following lower-level operations are typically useful only to people
+// implementing higher-level synchronization operations like spinlocks,
+// mutexes, and condition-variables. They combine CompareAndSwap(), a load, or
+// a store with appropriate memory-ordering instructions. "Acquire" operations
+// ensure that no later memory access can be reordered ahead of the operation.
+// "Release" operations ensure that no previous memory access can be reordered
+// after the operation. "Barrier" operations have both "Acquire" and "Release"
+// semantics.
+Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value);
+Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value);
+
+void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value);
+void Acquire_Store(volatile Atomic32* ptr, Atomic32 value);
+void Release_Store(volatile Atomic32* ptr, Atomic32 value);
+
+Atomic32 NoBarrier_Load(volatile const Atomic32* ptr);
+Atomic32 Acquire_Load(volatile const Atomic32* ptr);
+Atomic32 Release_Load(volatile const Atomic32* ptr);
+
+// 64-bit atomic operations (only available on 64-bit processors).
+#ifdef ARCH_CPU_64_BITS
+Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value);
+Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value);
+Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
+Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
+
+Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value);
+Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value);
+void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value);
+void Acquire_Store(volatile Atomic64* ptr, Atomic64 value);
+void Release_Store(volatile Atomic64* ptr, Atomic64 value);
+Atomic64 NoBarrier_Load(volatile const Atomic64* ptr);
+Atomic64 Acquire_Load(volatile const Atomic64* ptr);
+Atomic64 Release_Load(volatile const Atomic64* ptr);
+#endif // ARCH_CPU_64_BITS
+
+} // namespace subtle
+} // namespace base
+
+#if defined(OS_WIN) && defined(ARCH_CPU_X86_FAMILY)
+// TODO(jfb): Try to use base/atomicops_internals_portable.h everywhere.
+// https://crbug.com/559247.
+# include "base/atomicops_internals_x86_msvc.h"
+#else
+# include "base/atomicops_internals_portable.h"
+#endif
+
+// On some platforms we need additional declarations to make
+// AtomicWord compatible with our other Atomic* types.
+#if defined(OS_MACOSX) || defined(OS_OPENBSD)
+#include "base/atomicops_internals_atomicword_compat.h"
+#endif
+
+#endif // BASE_ATOMICOPS_H_
diff --git a/security/sandbox/chromium/base/atomicops_internals_portable.h b/security/sandbox/chromium/base/atomicops_internals_portable.h
new file mode 100644
index 0000000000..3b75be32c4
--- /dev/null
+++ b/security/sandbox/chromium/base/atomicops_internals_portable.h
@@ -0,0 +1,219 @@
+// Copyright (c) 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+//
+// This implementation uses C++11 atomics' member functions. The code base is
+// currently written assuming atomicity revolves around accesses instead of
+// C++11's memory locations. The burden is on the programmer to ensure that all
+// memory locations accessed atomically are never accessed non-atomically (tsan
+// should help with this).
+//
+// TODO(jfb) Modify the atomicops.h API and user code to declare atomic
+// locations as truly atomic. See the static_assert below.
+//
+// Of note in this implementation:
+// * All NoBarrier variants are implemented as relaxed.
+// * All Barrier variants are implemented as sequentially-consistent.
+// * Compare exchange's failure ordering is always the same as the success one
+// (except for release, which fails as relaxed): using a weaker ordering is
+// only valid under certain uses of compare exchange.
+// * Acquire store doesn't exist in the C11 memory model, it is instead
+// implemented as a relaxed store followed by a sequentially consistent
+// fence.
+// * Release load doesn't exist in the C11 memory model, it is instead
+// implemented as sequentially consistent fence followed by a relaxed load.
+// * Atomic increment is expected to return the post-incremented value, whereas
+// C11 fetch add returns the previous value. The implementation therefore
+// needs to increment twice (which the compiler should be able to detect and
+// optimize).
+
+#ifndef BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
+#define BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
+
+#include <atomic>
+
+#include "build/build_config.h"
+
+namespace base {
+namespace subtle {
+
+// This implementation is transitional and maintains the original API for
+// atomicops.h. This requires casting memory locations to the atomic types, and
+// assumes that the API and the C++11 implementation are layout-compatible,
+// which isn't true for all implementations or hardware platforms. The static
+// assertion should detect this issue, were it to fire then this header
+// shouldn't be used.
+//
+// TODO(jfb) If this header manages to stay committed then the API should be
+// modified, and all call sites updated.
+typedef volatile std::atomic<Atomic32>* AtomicLocation32;
+static_assert(sizeof(*(AtomicLocation32) nullptr) == sizeof(Atomic32),
+ "incompatible 32-bit atomic layout");
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ ((AtomicLocation32)ptr)
+ ->compare_exchange_strong(old_value,
+ new_value,
+ std::memory_order_relaxed,
+ std::memory_order_relaxed);
+ return old_value;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ return ((AtomicLocation32)ptr)
+ ->exchange(new_value, std::memory_order_relaxed);
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ return increment +
+ ((AtomicLocation32)ptr)
+ ->fetch_add(increment, std::memory_order_relaxed);
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ return increment + ((AtomicLocation32)ptr)->fetch_add(increment);
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ ((AtomicLocation32)ptr)
+ ->compare_exchange_strong(old_value,
+ new_value,
+ std::memory_order_acquire,
+ std::memory_order_acquire);
+ return old_value;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ ((AtomicLocation32)ptr)
+ ->compare_exchange_strong(old_value,
+ new_value,
+ std::memory_order_release,
+ std::memory_order_relaxed);
+ return old_value;
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+ ((AtomicLocation32)ptr)->store(value, std::memory_order_relaxed);
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+ ((AtomicLocation32)ptr)->store(value, std::memory_order_relaxed);
+ std::atomic_thread_fence(std::memory_order_seq_cst);
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+ ((AtomicLocation32)ptr)->store(value, std::memory_order_release);
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+ return ((AtomicLocation32)ptr)->load(std::memory_order_relaxed);
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+ return ((AtomicLocation32)ptr)->load(std::memory_order_acquire);
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+ std::atomic_thread_fence(std::memory_order_seq_cst);
+ return ((AtomicLocation32)ptr)->load(std::memory_order_relaxed);
+}
+
+#if defined(ARCH_CPU_64_BITS)
+
+typedef volatile std::atomic<Atomic64>* AtomicLocation64;
+static_assert(sizeof(*(AtomicLocation64) nullptr) == sizeof(Atomic64),
+ "incompatible 64-bit atomic layout");
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ ((AtomicLocation64)ptr)
+ ->compare_exchange_strong(old_value,
+ new_value,
+ std::memory_order_relaxed,
+ std::memory_order_relaxed);
+ return old_value;
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value) {
+ return ((AtomicLocation64)ptr)
+ ->exchange(new_value, std::memory_order_relaxed);
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ return increment +
+ ((AtomicLocation64)ptr)
+ ->fetch_add(increment, std::memory_order_relaxed);
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ return increment + ((AtomicLocation64)ptr)->fetch_add(increment);
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ ((AtomicLocation64)ptr)
+ ->compare_exchange_strong(old_value,
+ new_value,
+ std::memory_order_acquire,
+ std::memory_order_acquire);
+ return old_value;
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ ((AtomicLocation64)ptr)
+ ->compare_exchange_strong(old_value,
+ new_value,
+ std::memory_order_release,
+ std::memory_order_relaxed);
+ return old_value;
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+ ((AtomicLocation64)ptr)->store(value, std::memory_order_relaxed);
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+ ((AtomicLocation64)ptr)->store(value, std::memory_order_relaxed);
+ std::atomic_thread_fence(std::memory_order_seq_cst);
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+ ((AtomicLocation64)ptr)->store(value, std::memory_order_release);
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+ return ((AtomicLocation64)ptr)->load(std::memory_order_relaxed);
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+ return ((AtomicLocation64)ptr)->load(std::memory_order_acquire);
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+ std::atomic_thread_fence(std::memory_order_seq_cst);
+ return ((AtomicLocation64)ptr)->load(std::memory_order_relaxed);
+}
+
+#endif // defined(ARCH_CPU_64_BITS)
+} // namespace subtle
+} // namespace base
+
+#endif // BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
diff --git a/security/sandbox/chromium/base/atomicops_internals_x86_msvc.h b/security/sandbox/chromium/base/atomicops_internals_x86_msvc.h
new file mode 100644
index 0000000000..d9846f64b8
--- /dev/null
+++ b/security/sandbox/chromium/base/atomicops_internals_x86_msvc.h
@@ -0,0 +1,179 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation, use base/atomicops.h instead.
+
+#ifndef BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
+#define BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
+
+#include "base/win/windows_types.h"
+
+#include <intrin.h>
+
+#include <atomic>
+
+#include "base/macros.h"
+#include "build/build_config.h"
+
+namespace base {
+namespace subtle {
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ LONG result = _InterlockedCompareExchange(
+ reinterpret_cast<volatile LONG*>(ptr),
+ static_cast<LONG>(new_value),
+ static_cast<LONG>(old_value));
+ return static_cast<Atomic32>(result);
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ LONG result = _InterlockedExchange(
+ reinterpret_cast<volatile LONG*>(ptr),
+ static_cast<LONG>(new_value));
+ return static_cast<Atomic32>(result);
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ return _InterlockedExchangeAdd(
+ reinterpret_cast<volatile LONG*>(ptr),
+ static_cast<LONG>(increment)) + increment;
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ return Barrier_AtomicIncrement(ptr, increment);
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+ NoBarrier_AtomicExchange(ptr, value);
+ // acts as a barrier in this implementation
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value; // works w/o barrier for current Intel chips as of June 2005
+ // See comments in Atomic64 version of Release_Store() below.
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+ return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+ Atomic32 value = *ptr;
+ return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+ std::atomic_thread_fence(std::memory_order_seq_cst);
+ return *ptr;
+}
+
+#if defined(_WIN64)
+
+// 64-bit low-level operations on 64-bit platform.
+
+static_assert(sizeof(Atomic64) == sizeof(PVOID), "atomic word is atomic");
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ PVOID result = _InterlockedCompareExchangePointer(
+ reinterpret_cast<volatile PVOID*>(ptr),
+ reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value));
+ return reinterpret_cast<Atomic64>(result);
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value) {
+ PVOID result =
+ _InterlockedExchangePointer(reinterpret_cast<volatile PVOID*>(ptr),
+ reinterpret_cast<PVOID>(new_value));
+ return reinterpret_cast<Atomic64>(result);
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ return _InterlockedExchangeAdd64(reinterpret_cast<volatile LONGLONG*>(ptr),
+ static_cast<LONGLONG>(increment)) +
+ increment;
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ return Barrier_AtomicIncrement(ptr, increment);
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+ *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+ NoBarrier_AtomicExchange(ptr, value);
+ // acts as a barrier in this implementation
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+ *ptr = value; // works w/o barrier for current Intel chips as of June 2005
+
+ // When new chips come out, check:
+ // IA-32 Intel Architecture Software Developer's Manual, Volume 3:
+ // System Programming Guide, Chatper 7: Multiple-processor management,
+ // Section 7.2, Memory Ordering.
+ // Last seen at:
+ // http://developer.intel.com/design/pentium4/manuals/index_new.htm
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+ return *ptr;
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+ Atomic64 value = *ptr;
+ return value;
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+ std::atomic_thread_fence(std::memory_order_seq_cst);
+ return *ptr;
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+
+#endif // defined(_WIN64)
+
+} // namespace subtle
+} // namespace base
+
+#endif // BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
diff --git a/security/sandbox/chromium/base/base_export.h b/security/sandbox/chromium/base/base_export.h
new file mode 100644
index 0000000000..cf7ebd7816
--- /dev/null
+++ b/security/sandbox/chromium/base/base_export.h
@@ -0,0 +1,29 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_BASE_EXPORT_H_
+#define BASE_BASE_EXPORT_H_
+
+#if defined(COMPONENT_BUILD)
+#if defined(WIN32)
+
+#if defined(BASE_IMPLEMENTATION)
+#define BASE_EXPORT __declspec(dllexport)
+#else
+#define BASE_EXPORT __declspec(dllimport)
+#endif // defined(BASE_IMPLEMENTATION)
+
+#else // defined(WIN32)
+#if defined(BASE_IMPLEMENTATION)
+#define BASE_EXPORT __attribute__((visibility("default")))
+#else
+#define BASE_EXPORT
+#endif // defined(BASE_IMPLEMENTATION)
+#endif
+
+#else // defined(COMPONENT_BUILD)
+#define BASE_EXPORT
+#endif
+
+#endif // BASE_BASE_EXPORT_H_
diff --git a/security/sandbox/chromium/base/base_paths.h b/security/sandbox/chromium/base/base_paths.h
new file mode 100644
index 0000000000..2a163f48d4
--- /dev/null
+++ b/security/sandbox/chromium/base/base_paths.h
@@ -0,0 +1,55 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_BASE_PATHS_H_
+#define BASE_BASE_PATHS_H_
+
+// This file declares path keys for the base module. These can be used with
+// the PathService to access various special directories and files.
+
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include "base/base_paths_win.h"
+#elif defined(OS_MACOSX)
+#include "base/base_paths_mac.h"
+#elif defined(OS_ANDROID)
+#include "base/base_paths_android.h"
+#endif
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+#include "base/base_paths_posix.h"
+#endif
+
+namespace base {
+
+enum BasePathKey {
+ PATH_START = 0,
+
+ DIR_CURRENT, // Current directory.
+ DIR_EXE, // Directory containing FILE_EXE.
+ DIR_MODULE, // Directory containing FILE_MODULE.
+ DIR_ASSETS, // Directory that contains application assets.
+ DIR_TEMP, // Temporary directory.
+ DIR_HOME, // User's root home directory. On Windows this will look
+ // like "C:\Users\<user>" which isn't necessarily a great
+ // place to put files.
+ FILE_EXE, // Path and filename of the current executable.
+ FILE_MODULE, // Path and filename of the module containing the code for
+ // the PathService (which could differ from FILE_EXE if the
+ // PathService were compiled into a shared object, for
+ // example).
+ DIR_SOURCE_ROOT, // Returns the root of the source tree. This key is useful
+ // for tests that need to locate various resources. It
+ // should not be used outside of test code.
+ DIR_USER_DESKTOP, // The current user's Desktop.
+
+ DIR_TEST_DATA, // Used only for testing.
+
+ PATH_END
+};
+
+} // namespace base
+
+#endif // BASE_BASE_PATHS_H_
diff --git a/security/sandbox/chromium/base/base_paths_win.h b/security/sandbox/chromium/base/base_paths_win.h
new file mode 100644
index 0000000000..2db16a6271
--- /dev/null
+++ b/security/sandbox/chromium/base/base_paths_win.h
@@ -0,0 +1,53 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_BASE_PATHS_WIN_H_
+#define BASE_BASE_PATHS_WIN_H_
+
+// This file declares windows-specific path keys for the base module.
+// These can be used with the PathService to access various special
+// directories and files.
+
+namespace base {
+
+enum {
+ PATH_WIN_START = 100,
+
+ DIR_WINDOWS, // Windows directory, usually "c:\windows"
+ DIR_SYSTEM, // Usually c:\windows\system32"
+ // 32-bit 32-bit on 64-bit 64-bit on 64-bit
+ // DIR_PROGRAM_FILES 1 2 1
+ // DIR_PROGRAM_FILESX86 1 2 2
+ // DIR_PROGRAM_FILES6432 1 1 1
+ // 1 - C:\Program Files 2 - C:\Program Files (x86)
+ DIR_PROGRAM_FILES, // See table above.
+ DIR_PROGRAM_FILESX86, // See table above.
+ DIR_PROGRAM_FILES6432, // See table above.
+
+ DIR_IE_INTERNET_CACHE, // Temporary Internet Files directory.
+ DIR_COMMON_START_MENU, // Usually "C:\ProgramData\Microsoft\Windows\
+ // Start Menu\Programs"
+ DIR_START_MENU, // Usually "C:\Users\<user>\AppData\Roaming\
+ // Microsoft\Windows\Start Menu\Programs"
+ DIR_APP_DATA, // Application Data directory under the user
+ // profile.
+ DIR_LOCAL_APP_DATA, // "Local Settings\Application Data" directory
+ // under the user profile.
+ DIR_COMMON_APP_DATA, // Usually "C:\ProgramData".
+ DIR_APP_SHORTCUTS, // Where tiles on the start screen are stored,
+ // only for Windows 8. Maps to "Local\AppData\
+ // Microsoft\Windows\Application Shortcuts\".
+ DIR_COMMON_DESKTOP, // Directory for the common desktop (visible
+ // on all user's Desktop).
+ DIR_USER_QUICK_LAUNCH, // Directory for the quick launch shortcuts.
+ DIR_TASKBAR_PINS, // Directory for the shortcuts pinned to taskbar.
+ DIR_IMPLICIT_APP_SHORTCUTS, // The implicit user pinned shortcut directory.
+ DIR_WINDOWS_FONTS, // Usually C:\Windows\Fonts.
+
+ PATH_WIN_END
+};
+
+} // namespace base
+
+#endif // BASE_BASE_PATHS_WIN_H_
diff --git a/security/sandbox/chromium/base/base_switches.cc b/security/sandbox/chromium/base/base_switches.cc
new file mode 100644
index 0000000000..6a47487961
--- /dev/null
+++ b/security/sandbox/chromium/base/base_switches.cc
@@ -0,0 +1,149 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/base_switches.h"
+#include "build/build_config.h"
+
+namespace switches {
+
+// Delays execution of TaskPriority::BEST_EFFORT tasks until shutdown.
+const char kDisableBestEffortTasks[] = "disable-best-effort-tasks";
+
+// Disables the crash reporting.
+const char kDisableBreakpad[] = "disable-breakpad";
+
+// Comma-separated list of feature names to disable. See also kEnableFeatures.
+const char kDisableFeatures[] = "disable-features";
+
+// Force disabling of low-end device mode when set.
+const char kDisableLowEndDeviceMode[] = "disable-low-end-device-mode";
+
+// Indicates that crash reporting should be enabled. On platforms where helper
+// processes cannot access to files needed to make this decision, this flag is
+// generated internally.
+const char kEnableCrashReporter[] = "enable-crash-reporter";
+
+// Comma-separated list of feature names to enable. See also kDisableFeatures.
+const char kEnableFeatures[] = "enable-features";
+
+// Force low-end device mode when set.
+const char kEnableLowEndDeviceMode[] = "enable-low-end-device-mode";
+
+// This option can be used to force field trials when testing changes locally.
+// The argument is a list of name and value pairs, separated by slashes. If a
+// trial name is prefixed with an asterisk, that trial will start activated.
+// For example, the following argument defines two trials, with the second one
+// activated: "GoogleNow/Enable/*MaterialDesignNTP/Default/" This option can
+// also be used by the browser process to send the list of trials to a
+// non-browser process, using the same format. See
+// FieldTrialList::CreateTrialsFromString() in field_trial.h for details.
+const char kForceFieldTrials[] = "force-fieldtrials";
+
+// Generates full memory crash dump.
+const char kFullMemoryCrashReport[] = "full-memory-crash-report";
+
+// Logs information about all tasks posted with TaskPriority::BEST_EFFORT. Use
+// this to diagnose issues that are thought to be caused by
+// TaskPriority::BEST_EFFORT execution fences. Note: Tasks posted to a
+// non-BEST_EFFORT UpdateableSequencedTaskRunner whose priority is later lowered
+// to BEST_EFFORT are not logged.
+const char kLogBestEffortTasks[] = "log-best-effort-tasks";
+
+// Suppresses all error dialogs when present.
+const char kNoErrorDialogs[] = "noerrdialogs";
+
+// Starts the sampling based profiler for the browser process at startup. This
+// will only work if chrome has been built with the gn arg enable_profiling =
+// true. The output will go to the value of kProfilingFile.
+const char kProfilingAtStart[] = "profiling-at-start";
+
+// Specifies a location for profiling output. This will only work if chrome has
+// been built with the gyp variable profiling=1 or gn arg enable_profiling=true.
+//
+// {pid} if present will be replaced by the pid of the process.
+// {count} if present will be incremented each time a profile is generated
+// for this process.
+// The default is chrome-profile-{pid} for the browser and test-profile-{pid}
+// for tests.
+const char kProfilingFile[] = "profiling-file";
+
+// Controls whether profile data is periodically flushed to a file. Normally
+// the data gets written on exit but cases exist where chromium doesn't exit
+// cleanly (especially when using single-process). A time in seconds can be
+// specified.
+const char kProfilingFlush[] = "profiling-flush";
+
+// When running certain tests that spawn child processes, this switch indicates
+// to the test framework that the current process is a child process.
+const char kTestChildProcess[] = "test-child-process";
+
+// When running certain tests that spawn child processes, this switch indicates
+// to the test framework that the current process should not initialize ICU to
+// avoid creating any scoped handles too early in startup.
+const char kTestDoNotInitializeIcu[] = "test-do-not-initialize-icu";
+
+// Sends trace events from these categories to a file.
+// --trace-to-file on its own sends to default categories.
+const char kTraceToFile[] = "trace-to-file";
+
+// Specifies the file name for --trace-to-file. If unspecified, it will
+// go to a default file name.
+const char kTraceToFileName[] = "trace-to-file-name";
+
+// Gives the default maximal active V-logging level; 0 is the default.
+// Normally positive values are used for V-logging levels.
+const char kV[] = "v";
+
+// Gives the per-module maximal V-logging levels to override the value
+// given by --v. E.g. "my_module=2,foo*=3" would change the logging
+// level for all code in source files "my_module.*" and "foo*.*"
+// ("-inl" suffixes are also disregarded for this matching).
+//
+// Any pattern containing a forward or backward slash will be tested
+// against the whole pathname and not just the module. E.g.,
+// "*/foo/bar/*=2" would change the logging level for all code in
+// source files under a "foo/bar" directory.
+const char kVModule[] = "vmodule";
+
+// Will wait for 60 seconds for a debugger to come to attach to the process.
+const char kWaitForDebugger[] = "wait-for-debugger";
+
+#if defined(OS_WIN)
+// Disable high-resolution timer on Windows.
+const char kDisableHighResTimer[] = "disable-highres-timer";
+
+// Disables the USB keyboard detection for blocking the OSK on Win8+.
+const char kDisableUsbKeyboardDetect[] = "disable-usb-keyboard-detect";
+#endif
+
+#if defined(OS_LINUX) && !defined(OS_CHROMEOS)
+// The /dev/shm partition is too small in certain VM environments, causing
+// Chrome to fail or crash (see http://crbug.com/715363). Use this flag to
+// work-around this issue (a temporary directory will always be used to create
+// anonymous shared memory files).
+const char kDisableDevShmUsage[] = "disable-dev-shm-usage";
+#endif
+
+#if defined(OS_POSIX)
+// Used for turning on Breakpad crash reporting in a debug environment where
+// crash reporting is typically compiled but disabled.
+const char kEnableCrashReporterForTesting[] =
+ "enable-crash-reporter-for-testing";
+#endif
+
+#if defined(OS_ANDROID)
+// Enables the reached code profiler that samples all threads in all processes
+// to determine which functions are almost never executed.
+const char kEnableReachedCodeProfiler[] = "enable-reached-code-profiler";
+#endif
+
+#if defined(OS_LINUX)
+// Controls whether or not retired instruction counts are surfaced for threads
+// in trace events on Linux.
+//
+// This flag requires the BPF sandbox to be disabled.
+const char kEnableThreadInstructionCount[] = "enable-thread-instruction-count";
+#endif
+
+} // namespace switches
diff --git a/security/sandbox/chromium/base/base_switches.h b/security/sandbox/chromium/base/base_switches.h
new file mode 100644
index 0000000000..b1923efc1e
--- /dev/null
+++ b/security/sandbox/chromium/base/base_switches.h
@@ -0,0 +1,60 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Defines all the "base" command-line switches.
+
+#ifndef BASE_BASE_SWITCHES_H_
+#define BASE_BASE_SWITCHES_H_
+
+#include "build/build_config.h"
+
+namespace switches {
+
+extern const char kDisableBestEffortTasks[];
+extern const char kDisableBreakpad[];
+extern const char kDisableFeatures[];
+extern const char kDisableLowEndDeviceMode[];
+extern const char kEnableCrashReporter[];
+extern const char kEnableFeatures[];
+extern const char kEnableLowEndDeviceMode[];
+extern const char kForceFieldTrials[];
+extern const char kFullMemoryCrashReport[];
+extern const char kLogBestEffortTasks[];
+extern const char kNoErrorDialogs[];
+extern const char kProfilingAtStart[];
+extern const char kProfilingFile[];
+extern const char kProfilingFlush[];
+extern const char kTestChildProcess[];
+extern const char kTestDoNotInitializeIcu[];
+extern const char kTraceToFile[];
+extern const char kTraceToFileName[];
+extern const char kV[];
+extern const char kVModule[];
+extern const char kWaitForDebugger[];
+
+#if defined(OS_WIN)
+extern const char kDisableHighResTimer[];
+extern const char kDisableUsbKeyboardDetect[];
+#endif
+
+#if defined(OS_LINUX) && !defined(OS_CHROMEOS)
+extern const char kDisableDevShmUsage[];
+#endif
+
+#if defined(OS_POSIX)
+extern const char kEnableCrashReporterForTesting[];
+#endif
+
+#if defined(OS_ANDROID)
+extern const char kEnableReachedCodeProfiler[];
+extern const char kOrderfileMemoryOptimization[];
+#endif
+
+#if defined(OS_LINUX)
+extern const char kEnableThreadInstructionCount[];
+#endif
+
+} // namespace switches
+
+#endif // BASE_BASE_SWITCHES_H_
diff --git a/security/sandbox/chromium/base/bind.h b/security/sandbox/chromium/base/bind.h
new file mode 100644
index 0000000000..0bbc2aceb1
--- /dev/null
+++ b/security/sandbox/chromium/base/bind.h
@@ -0,0 +1,470 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_BIND_H_
+#define BASE_BIND_H_
+
+#include <functional>
+#include <memory>
+#include <type_traits>
+#include <utility>
+
+#include "base/bind_internal.h"
+#include "base/compiler_specific.h"
+#include "build/build_config.h"
+
+#if defined(OS_MACOSX) && !HAS_FEATURE(objc_arc)
+#include "base/mac/scoped_block.h"
+#endif
+
+// -----------------------------------------------------------------------------
+// Usage documentation
+// -----------------------------------------------------------------------------
+//
+// Overview:
+// base::BindOnce() and base::BindRepeating() are helpers for creating
+// base::OnceCallback and base::RepeatingCallback objects respectively.
+//
+// For a runnable object of n-arity, the base::Bind*() family allows partial
+// application of the first m arguments. The remaining n - m arguments must be
+// passed when invoking the callback with Run().
+//
+// // The first argument is bound at callback creation; the remaining
+// // two must be passed when calling Run() on the callback object.
+// base::OnceCallback<long(int, long)> cb = base::BindOnce(
+// [](short x, int y, long z) { return x * y * z; }, 42);
+//
+// When binding to a method, the receiver object must also be specified at
+// callback creation time. When Run() is invoked, the method will be invoked on
+// the specified receiver object.
+//
+// class C : public base::RefCounted<C> { void F(); };
+// auto instance = base::MakeRefCounted<C>();
+// auto cb = base::BindOnce(&C::F, instance);
+// std::move(cb).Run(); // Identical to instance->F()
+//
+// base::Bind is currently a type alias for base::BindRepeating(). In the
+// future, we expect to flip this to default to base::BindOnce().
+//
+// See //docs/callback.md for the full documentation.
+//
+// -----------------------------------------------------------------------------
+// Implementation notes
+// -----------------------------------------------------------------------------
+//
+// If you're reading the implementation, before proceeding further, you should
+// read the top comment of base/bind_internal.h for a definition of common
+// terms and concepts.
+
+namespace base {
+
+namespace internal {
+
+// IsOnceCallback<T> is a std::true_type if |T| is a OnceCallback.
+template <typename T>
+struct IsOnceCallback : std::false_type {};
+
+template <typename Signature>
+struct IsOnceCallback<OnceCallback<Signature>> : std::true_type {};
+
+// Helper to assert that parameter |i| of type |Arg| can be bound, which means:
+// - |Arg| can be retained internally as |Storage|.
+// - |Arg| can be forwarded as |Unwrapped| to |Param|.
+template <size_t i,
+ typename Arg,
+ typename Storage,
+ typename Unwrapped,
+ typename Param>
+struct AssertConstructible {
+ private:
+ static constexpr bool param_is_forwardable =
+ std::is_constructible<Param, Unwrapped>::value;
+ // Unlike the check for binding into storage below, the check for
+ // forwardability drops the const qualifier for repeating callbacks. This is
+ // to try to catch instances where std::move()--which forwards as a const
+ // reference with repeating callbacks--is used instead of base::Passed().
+ static_assert(
+ param_is_forwardable ||
+ !std::is_constructible<Param, std::decay_t<Unwrapped>&&>::value,
+ "Bound argument |i| is move-only but will be forwarded by copy. "
+ "Ensure |Arg| is bound using base::Passed(), not std::move().");
+ static_assert(
+ param_is_forwardable,
+ "Bound argument |i| of type |Arg| cannot be forwarded as "
+ "|Unwrapped| to the bound functor, which declares it as |Param|.");
+
+ static constexpr bool arg_is_storable =
+ std::is_constructible<Storage, Arg>::value;
+ static_assert(arg_is_storable ||
+ !std::is_constructible<Storage, std::decay_t<Arg>&&>::value,
+ "Bound argument |i| is move-only but will be bound by copy. "
+ "Ensure |Arg| is mutable and bound using std::move().");
+ static_assert(arg_is_storable,
+ "Bound argument |i| of type |Arg| cannot be converted and "
+ "bound as |Storage|.");
+};
+
+// Takes three same-length TypeLists, and applies AssertConstructible for each
+// triples.
+template <typename Index,
+ typename Args,
+ typename UnwrappedTypeList,
+ typename ParamsList>
+struct AssertBindArgsValidity;
+
+template <size_t... Ns,
+ typename... Args,
+ typename... Unwrapped,
+ typename... Params>
+struct AssertBindArgsValidity<std::index_sequence<Ns...>,
+ TypeList<Args...>,
+ TypeList<Unwrapped...>,
+ TypeList<Params...>>
+ : AssertConstructible<Ns, Args, std::decay_t<Args>, Unwrapped, Params>... {
+ static constexpr bool ok = true;
+};
+
+// The implementation of TransformToUnwrappedType below.
+template <bool is_once, typename T>
+struct TransformToUnwrappedTypeImpl;
+
+template <typename T>
+struct TransformToUnwrappedTypeImpl<true, T> {
+ using StoredType = std::decay_t<T>;
+ using ForwardType = StoredType&&;
+ using Unwrapped = decltype(Unwrap(std::declval<ForwardType>()));
+};
+
+template <typename T>
+struct TransformToUnwrappedTypeImpl<false, T> {
+ using StoredType = std::decay_t<T>;
+ using ForwardType = const StoredType&;
+ using Unwrapped = decltype(Unwrap(std::declval<ForwardType>()));
+};
+
+// Transform |T| into `Unwrapped` type, which is passed to the target function.
+// Example:
+// In is_once == true case,
+// `int&&` -> `int&&`,
+// `const int&` -> `int&&`,
+// `OwnedWrapper<int>&` -> `int*&&`.
+// In is_once == false case,
+// `int&&` -> `const int&`,
+// `const int&` -> `const int&`,
+// `OwnedWrapper<int>&` -> `int* const &`.
+template <bool is_once, typename T>
+using TransformToUnwrappedType =
+ typename TransformToUnwrappedTypeImpl<is_once, T>::Unwrapped;
+
+// Transforms |Args| into `Unwrapped` types, and packs them into a TypeList.
+// If |is_method| is true, tries to dereference the first argument to support
+// smart pointers.
+template <bool is_once, bool is_method, typename... Args>
+struct MakeUnwrappedTypeListImpl {
+ using Type = TypeList<TransformToUnwrappedType<is_once, Args>...>;
+};
+
+// Performs special handling for this pointers.
+// Example:
+// int* -> int*,
+// std::unique_ptr<int> -> int*.
+template <bool is_once, typename Receiver, typename... Args>
+struct MakeUnwrappedTypeListImpl<is_once, true, Receiver, Args...> {
+ using UnwrappedReceiver = TransformToUnwrappedType<is_once, Receiver>;
+ using Type = TypeList<decltype(&*std::declval<UnwrappedReceiver>()),
+ TransformToUnwrappedType<is_once, Args>...>;
+};
+
+template <bool is_once, bool is_method, typename... Args>
+using MakeUnwrappedTypeList =
+ typename MakeUnwrappedTypeListImpl<is_once, is_method, Args...>::Type;
+
+// Used below in BindImpl to determine whether to use Invoker::Run or
+// Invoker::RunOnce.
+// Note: Simply using `kIsOnce ? &Invoker::RunOnce : &Invoker::Run` does not
+// work, since the compiler needs to check whether both expressions are
+// well-formed. Using `Invoker::Run` with a OnceCallback triggers a
+// static_assert, which is why the ternary expression does not compile.
+// TODO(crbug.com/752720): Remove this indirection once we have `if constexpr`.
+template <typename Invoker>
+constexpr auto GetInvokeFunc(std::true_type) {
+ return Invoker::RunOnce;
+}
+
+template <typename Invoker>
+constexpr auto GetInvokeFunc(std::false_type) {
+ return Invoker::Run;
+}
+
+template <template <typename> class CallbackT,
+ typename Functor,
+ typename... Args>
+decltype(auto) BindImpl(Functor&& functor, Args&&... args) {
+ // This block checks if each |args| matches to the corresponding params of the
+ // target function. This check does not affect the behavior of Bind, but its
+ // error message should be more readable.
+ static constexpr bool kIsOnce = IsOnceCallback<CallbackT<void()>>::value;
+ using Helper = internal::BindTypeHelper<Functor, Args...>;
+ using FunctorTraits = typename Helper::FunctorTraits;
+ using BoundArgsList = typename Helper::BoundArgsList;
+ using UnwrappedArgsList =
+ internal::MakeUnwrappedTypeList<kIsOnce, FunctorTraits::is_method,
+ Args&&...>;
+ using BoundParamsList = typename Helper::BoundParamsList;
+ static_assert(internal::AssertBindArgsValidity<
+ std::make_index_sequence<Helper::num_bounds>, BoundArgsList,
+ UnwrappedArgsList, BoundParamsList>::ok,
+ "The bound args need to be convertible to the target params.");
+
+ using BindState = internal::MakeBindStateType<Functor, Args...>;
+ using UnboundRunType = MakeUnboundRunType<Functor, Args...>;
+ using Invoker = internal::Invoker<BindState, UnboundRunType>;
+ using CallbackType = CallbackT<UnboundRunType>;
+
+ // Store the invoke func into PolymorphicInvoke before casting it to
+ // InvokeFuncStorage, so that we can ensure its type matches to
+ // PolymorphicInvoke, to which CallbackType will cast back.
+ using PolymorphicInvoke = typename CallbackType::PolymorphicInvoke;
+ PolymorphicInvoke invoke_func =
+ GetInvokeFunc<Invoker>(std::integral_constant<bool, kIsOnce>());
+
+ using InvokeFuncStorage = internal::BindStateBase::InvokeFuncStorage;
+ return CallbackType(BindState::Create(
+ reinterpret_cast<InvokeFuncStorage>(invoke_func),
+ std::forward<Functor>(functor), std::forward<Args>(args)...));
+}
+
+} // namespace internal
+
+// Bind as OnceCallback.
+template <typename Functor, typename... Args>
+inline OnceCallback<MakeUnboundRunType<Functor, Args...>> BindOnce(
+ Functor&& functor,
+ Args&&... args) {
+ static_assert(!internal::IsOnceCallback<std::decay_t<Functor>>() ||
+ (std::is_rvalue_reference<Functor&&>() &&
+ !std::is_const<std::remove_reference_t<Functor>>()),
+ "BindOnce requires non-const rvalue for OnceCallback binding."
+ " I.e.: base::BindOnce(std::move(callback)).");
+
+ return internal::BindImpl<OnceCallback>(std::forward<Functor>(functor),
+ std::forward<Args>(args)...);
+}
+
+// Bind as RepeatingCallback.
+template <typename Functor, typename... Args>
+inline RepeatingCallback<MakeUnboundRunType<Functor, Args...>>
+BindRepeating(Functor&& functor, Args&&... args) {
+ static_assert(
+ !internal::IsOnceCallback<std::decay_t<Functor>>(),
+ "BindRepeating cannot bind OnceCallback. Use BindOnce with std::move().");
+
+ return internal::BindImpl<RepeatingCallback>(std::forward<Functor>(functor),
+ std::forward<Args>(args)...);
+}
+
+// Unannotated Bind.
+// TODO(tzik): Deprecate this and migrate to OnceCallback and
+// RepeatingCallback, once they get ready.
+template <typename Functor, typename... Args>
+inline Callback<MakeUnboundRunType<Functor, Args...>>
+Bind(Functor&& functor, Args&&... args) {
+ return base::BindRepeating(std::forward<Functor>(functor),
+ std::forward<Args>(args)...);
+}
+
+// Special cases for binding to a base::Callback without extra bound arguments.
+template <typename Signature>
+OnceCallback<Signature> BindOnce(OnceCallback<Signature> callback) {
+ return callback;
+}
+
+template <typename Signature>
+OnceCallback<Signature> BindOnce(RepeatingCallback<Signature> callback) {
+ return callback;
+}
+
+template <typename Signature>
+RepeatingCallback<Signature> BindRepeating(
+ RepeatingCallback<Signature> callback) {
+ return callback;
+}
+
+template <typename Signature>
+Callback<Signature> Bind(Callback<Signature> callback) {
+ return callback;
+}
+
+// Unretained() allows binding a non-refcounted class, and to disable
+// refcounting on arguments that are refcounted objects.
+//
+// EXAMPLE OF Unretained():
+//
+// class Foo {
+// public:
+// void func() { cout << "Foo:f" << endl; }
+// };
+//
+// // In some function somewhere.
+// Foo foo;
+// OnceClosure foo_callback =
+// BindOnce(&Foo::func, Unretained(&foo));
+// std::move(foo_callback).Run(); // Prints "Foo:f".
+//
+// Without the Unretained() wrapper on |&foo|, the above call would fail
+// to compile because Foo does not support the AddRef() and Release() methods.
+template <typename T>
+static inline internal::UnretainedWrapper<T> Unretained(T* o) {
+ return internal::UnretainedWrapper<T>(o);
+}
+
+// RetainedRef() accepts a ref counted object and retains a reference to it.
+// When the callback is called, the object is passed as a raw pointer.
+//
+// EXAMPLE OF RetainedRef():
+//
+// void foo(RefCountedBytes* bytes) {}
+//
+// scoped_refptr<RefCountedBytes> bytes = ...;
+// OnceClosure callback = BindOnce(&foo, base::RetainedRef(bytes));
+// std::move(callback).Run();
+//
+// Without RetainedRef, the scoped_refptr would try to implicitly convert to
+// a raw pointer and fail compilation:
+//
+// OnceClosure callback = BindOnce(&foo, bytes); // ERROR!
+template <typename T>
+static inline internal::RetainedRefWrapper<T> RetainedRef(T* o) {
+ return internal::RetainedRefWrapper<T>(o);
+}
+template <typename T>
+static inline internal::RetainedRefWrapper<T> RetainedRef(scoped_refptr<T> o) {
+ return internal::RetainedRefWrapper<T>(std::move(o));
+}
+
+// Owned() transfers ownership of an object to the callback resulting from
+// bind; the object will be deleted when the callback is deleted.
+//
+// EXAMPLE OF Owned():
+//
+// void foo(int* arg) { cout << *arg << endl }
+//
+// int* pn = new int(1);
+// RepeatingClosure foo_callback = BindRepeating(&foo, Owned(pn));
+//
+// foo_callback.Run(); // Prints "1"
+// foo_callback.Run(); // Prints "1"
+// *pn = 2;
+// foo_callback.Run(); // Prints "2"
+//
+// foo_callback.Reset(); // |pn| is deleted. Also will happen when
+// // |foo_callback| goes out of scope.
+//
+// Without Owned(), someone would have to know to delete |pn| when the last
+// reference to the callback is deleted.
+template <typename T>
+static inline internal::OwnedWrapper<T> Owned(T* o) {
+ return internal::OwnedWrapper<T>(o);
+}
+
+template <typename T, typename Deleter>
+static inline internal::OwnedWrapper<T, Deleter> Owned(
+ std::unique_ptr<T, Deleter>&& ptr) {
+ return internal::OwnedWrapper<T, Deleter>(std::move(ptr));
+}
+
+// Passed() is for transferring movable-but-not-copyable types (eg. unique_ptr)
+// through a RepeatingCallback. Logically, this signifies a destructive transfer
+// of the state of the argument into the target function. Invoking
+// RepeatingCallback::Run() twice on a callback that was created with a Passed()
+// argument will CHECK() because the first invocation would have already
+// transferred ownership to the target function.
+//
+// Note that Passed() is not necessary with BindOnce(), as std::move() does the
+// same thing. Avoid Passed() in favor of std::move() with BindOnce().
+//
+// EXAMPLE OF Passed():
+//
+// void TakesOwnership(std::unique_ptr<Foo> arg) { }
+// std::unique_ptr<Foo> CreateFoo() { return std::make_unique<Foo>();
+// }
+//
+// auto f = std::make_unique<Foo>();
+//
+// // |cb| is given ownership of Foo(). |f| is now NULL.
+// // You can use std::move(f) in place of &f, but it's more verbose.
+// RepeatingClosure cb = BindRepeating(&TakesOwnership, Passed(&f));
+//
+// // Run was never called so |cb| still owns Foo() and deletes
+// // it on Reset().
+// cb.Reset();
+//
+// // |cb| is given a new Foo created by CreateFoo().
+// cb = BindRepeating(&TakesOwnership, Passed(CreateFoo()));
+//
+// // |arg| in TakesOwnership() is given ownership of Foo(). |cb|
+// // no longer owns Foo() and, if reset, would not delete Foo().
+// cb.Run(); // Foo() is now transferred to |arg| and deleted.
+// cb.Run(); // This CHECK()s since Foo() already been used once.
+//
+// We offer 2 syntaxes for calling Passed(). The first takes an rvalue and is
+// best suited for use with the return value of a function or other temporary
+// rvalues. The second takes a pointer to the scoper and is just syntactic sugar
+// to avoid having to write Passed(std::move(scoper)).
+//
+// Both versions of Passed() prevent T from being an lvalue reference. The first
+// via use of enable_if, and the second takes a T* which will not bind to T&.
+template <typename T,
+ std::enable_if_t<!std::is_lvalue_reference<T>::value>* = nullptr>
+static inline internal::PassedWrapper<T> Passed(T&& scoper) {
+ return internal::PassedWrapper<T>(std::move(scoper));
+}
+template <typename T>
+static inline internal::PassedWrapper<T> Passed(T* scoper) {
+ return internal::PassedWrapper<T>(std::move(*scoper));
+}
+
+// IgnoreResult() is used to adapt a function or callback with a return type to
+// one with a void return. This is most useful if you have a function with,
+// say, a pesky ignorable bool return that you want to use with PostTask or
+// something else that expect a callback with a void return.
+//
+// EXAMPLE OF IgnoreResult():
+//
+// int DoSomething(int arg) { cout << arg << endl; }
+//
+// // Assign to a callback with a void return type.
+// OnceCallback<void(int)> cb = BindOnce(IgnoreResult(&DoSomething));
+// std::move(cb).Run(1); // Prints "1".
+//
+// // Prints "2" on |ml|.
+// ml->PostTask(FROM_HERE, BindOnce(IgnoreResult(&DoSomething), 2);
+template <typename T>
+static inline internal::IgnoreResultHelper<T> IgnoreResult(T data) {
+ return internal::IgnoreResultHelper<T>(std::move(data));
+}
+
+#if defined(OS_MACOSX) && !HAS_FEATURE(objc_arc)
+
+// RetainBlock() is used to adapt an Objective-C block when Automated Reference
+// Counting (ARC) is disabled. This is unnecessary when ARC is enabled, as the
+// BindOnce and BindRepeating already support blocks then.
+//
+// EXAMPLE OF RetainBlock():
+//
+// // Wrap the block and bind it to a callback.
+// OnceCallback<void(int)> cb =
+// BindOnce(RetainBlock(^(int n) { NSLog(@"%d", n); }));
+// std::move(cb).Run(1); // Logs "1".
+template <typename R, typename... Args>
+base::mac::ScopedBlock<R (^)(Args...)> RetainBlock(R (^block)(Args...)) {
+ return base::mac::ScopedBlock<R (^)(Args...)>(block,
+ base::scoped_policy::RETAIN);
+}
+
+#endif // defined(OS_MACOSX) && !HAS_FEATURE(objc_arc)
+
+} // namespace base
+
+#endif // BASE_BIND_H_
diff --git a/security/sandbox/chromium/base/bind_helpers.h b/security/sandbox/chromium/base/bind_helpers.h
new file mode 100644
index 0000000000..37065a07ab
--- /dev/null
+++ b/security/sandbox/chromium/base/bind_helpers.h
@@ -0,0 +1,69 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_BIND_HELPERS_H_
+#define BASE_BIND_HELPERS_H_
+
+#include <stddef.h>
+
+#include <type_traits>
+#include <utility>
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/memory/weak_ptr.h"
+#include "build/build_config.h"
+
+// This defines a set of simple functions and utilities that people want when
+// using {Once,Repeating}Callback<> and Bind{Once,Repeating}().
+
+namespace base {
+
+// Creates a null callback.
+class BASE_EXPORT NullCallback {
+ public:
+ template <typename R, typename... Args>
+ operator RepeatingCallback<R(Args...)>() const {
+ return RepeatingCallback<R(Args...)>();
+ }
+ template <typename R, typename... Args>
+ operator OnceCallback<R(Args...)>() const {
+ return OnceCallback<R(Args...)>();
+ }
+};
+
+// Creates a callback that does nothing when called.
+class BASE_EXPORT DoNothing {
+ public:
+ template <typename... Args>
+ operator RepeatingCallback<void(Args...)>() const {
+ return Repeatedly<Args...>();
+ }
+ template <typename... Args>
+ operator OnceCallback<void(Args...)>() const {
+ return Once<Args...>();
+ }
+ // Explicit way of specifying a specific callback type when the compiler can't
+ // deduce it.
+ template <typename... Args>
+ static RepeatingCallback<void(Args...)> Repeatedly() {
+ return BindRepeating([](Args... args) {});
+ }
+ template <typename... Args>
+ static OnceCallback<void(Args...)> Once() {
+ return BindOnce([](Args... args) {});
+ }
+};
+
+// Useful for creating a Closure that will delete a pointer when invoked. Only
+// use this when necessary. In most cases MessageLoop::DeleteSoon() is a better
+// fit.
+template <typename T>
+void DeletePointer(T* obj) {
+ delete obj;
+}
+
+} // namespace base
+
+#endif // BASE_BIND_HELPERS_H_
diff --git a/security/sandbox/chromium/base/bind_internal.h b/security/sandbox/chromium/base/bind_internal.h
new file mode 100644
index 0000000000..bed151a8dc
--- /dev/null
+++ b/security/sandbox/chromium/base/bind_internal.h
@@ -0,0 +1,1050 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_BIND_INTERNAL_H_
+#define BASE_BIND_INTERNAL_H_
+
+#include <stddef.h>
+
+#include <functional>
+#include <memory>
+#include <tuple>
+#include <type_traits>
+#include <utility>
+
+#include "base/bind.h"
+#include "base/callback_internal.h"
+#include "base/compiler_specific.h"
+#include "base/memory/raw_scoped_refptr_mismatch_checker.h"
+#include "base/memory/weak_ptr.h"
+#include "base/template_util.h"
+#include "build/build_config.h"
+
+#if defined(OS_MACOSX) && !HAS_FEATURE(objc_arc)
+#include "base/mac/scoped_block.h"
+#endif
+
+// See base/callback.h for user documentation.
+//
+//
+// CONCEPTS:
+// Functor -- A movable type representing something that should be called.
+// All function pointers and Callback<> are functors even if the
+// invocation syntax differs.
+// RunType -- A function type (as opposed to function _pointer_ type) for
+// a Callback<>::Run(). Usually just a convenience typedef.
+// (Bound)Args -- A set of types that stores the arguments.
+//
+// Types:
+// ForceVoidReturn<> -- Helper class for translating function signatures to
+// equivalent forms with a "void" return type.
+// FunctorTraits<> -- Type traits used to determine the correct RunType and
+// invocation manner for a Functor. This is where function
+// signature adapters are applied.
+// InvokeHelper<> -- Take a Functor + arguments and actully invokes it.
+// Handle the differing syntaxes needed for WeakPtr<>
+// support. This is separate from Invoker to avoid creating
+// multiple version of Invoker<>.
+// Invoker<> -- Unwraps the curried parameters and executes the Functor.
+// BindState<> -- Stores the curried parameters, and is the main entry point
+// into the Bind() system.
+
+#if defined(OS_WIN)
+namespace Microsoft {
+namespace WRL {
+template <typename>
+class ComPtr;
+} // namespace WRL
+} // namespace Microsoft
+#endif
+
+namespace base {
+
+template <typename T>
+struct IsWeakReceiver;
+
+template <typename>
+struct BindUnwrapTraits;
+
+template <typename Functor, typename BoundArgsTuple, typename SFINAE = void>
+struct CallbackCancellationTraits;
+
+namespace internal {
+
+template <typename Functor, typename SFINAE = void>
+struct FunctorTraits;
+
+template <typename T>
+class UnretainedWrapper {
+ public:
+ explicit UnretainedWrapper(T* o) : ptr_(o) {}
+ T* get() const { return ptr_; }
+
+ private:
+ T* ptr_;
+};
+
+template <typename T>
+class RetainedRefWrapper {
+ public:
+ explicit RetainedRefWrapper(T* o) : ptr_(o) {}
+ explicit RetainedRefWrapper(scoped_refptr<T> o) : ptr_(std::move(o)) {}
+ T* get() const { return ptr_.get(); }
+
+ private:
+ scoped_refptr<T> ptr_;
+};
+
+template <typename T>
+struct IgnoreResultHelper {
+ explicit IgnoreResultHelper(T functor) : functor_(std::move(functor)) {}
+ explicit operator bool() const { return !!functor_; }
+
+ T functor_;
+};
+
+template <typename T, typename Deleter = std::default_delete<T>>
+class OwnedWrapper {
+ public:
+ explicit OwnedWrapper(T* o) : ptr_(o) {}
+ explicit OwnedWrapper(std::unique_ptr<T, Deleter>&& ptr)
+ : ptr_(std::move(ptr)) {}
+ T* get() const { return ptr_.get(); }
+
+ private:
+ std::unique_ptr<T, Deleter> ptr_;
+};
+
+// PassedWrapper is a copyable adapter for a scoper that ignores const.
+//
+// It is needed to get around the fact that Bind() takes a const reference to
+// all its arguments. Because Bind() takes a const reference to avoid
+// unnecessary copies, it is incompatible with movable-but-not-copyable
+// types; doing a destructive "move" of the type into Bind() would violate
+// the const correctness.
+//
+// This conundrum cannot be solved without either C++11 rvalue references or
+// a O(2^n) blowup of Bind() templates to handle each combination of regular
+// types and movable-but-not-copyable types. Thus we introduce a wrapper type
+// that is copyable to transmit the correct type information down into
+// BindState<>. Ignoring const in this type makes sense because it is only
+// created when we are explicitly trying to do a destructive move.
+//
+// Two notes:
+// 1) PassedWrapper supports any type that has a move constructor, however
+// the type will need to be specifically whitelisted in order for it to be
+// bound to a Callback. We guard this explicitly at the call of Passed()
+// to make for clear errors. Things not given to Passed() will be forwarded
+// and stored by value which will not work for general move-only types.
+// 2) is_valid_ is distinct from NULL because it is valid to bind a "NULL"
+// scoper to a Callback and allow the Callback to execute once.
+template <typename T>
+class PassedWrapper {
+ public:
+ explicit PassedWrapper(T&& scoper)
+ : is_valid_(true), scoper_(std::move(scoper)) {}
+ PassedWrapper(PassedWrapper&& other)
+ : is_valid_(other.is_valid_), scoper_(std::move(other.scoper_)) {}
+ T Take() const {
+ CHECK(is_valid_);
+ is_valid_ = false;
+ return std::move(scoper_);
+ }
+
+ private:
+ mutable bool is_valid_;
+ mutable T scoper_;
+};
+
+template <typename T>
+using Unwrapper = BindUnwrapTraits<std::decay_t<T>>;
+
+template <typename T>
+decltype(auto) Unwrap(T&& o) {
+ return Unwrapper<T>::Unwrap(std::forward<T>(o));
+}
+
+// IsWeakMethod is a helper that determine if we are binding a WeakPtr<> to a
+// method. It is used internally by Bind() to select the correct
+// InvokeHelper that will no-op itself in the event the WeakPtr<> for
+// the target object is invalidated.
+//
+// The first argument should be the type of the object that will be received by
+// the method.
+template <bool is_method, typename... Args>
+struct IsWeakMethod : std::false_type {};
+
+template <typename T, typename... Args>
+struct IsWeakMethod<true, T, Args...> : IsWeakReceiver<T> {};
+
+// Packs a list of types to hold them in a single type.
+template <typename... Types>
+struct TypeList {};
+
+// Used for DropTypeListItem implementation.
+template <size_t n, typename List>
+struct DropTypeListItemImpl;
+
+// Do not use enable_if and SFINAE here to avoid MSVC2013 compile failure.
+template <size_t n, typename T, typename... List>
+struct DropTypeListItemImpl<n, TypeList<T, List...>>
+ : DropTypeListItemImpl<n - 1, TypeList<List...>> {};
+
+template <typename T, typename... List>
+struct DropTypeListItemImpl<0, TypeList<T, List...>> {
+ using Type = TypeList<T, List...>;
+};
+
+template <>
+struct DropTypeListItemImpl<0, TypeList<>> {
+ using Type = TypeList<>;
+};
+
+// A type-level function that drops |n| list item from given TypeList.
+template <size_t n, typename List>
+using DropTypeListItem = typename DropTypeListItemImpl<n, List>::Type;
+
+// Used for TakeTypeListItem implementation.
+template <size_t n, typename List, typename... Accum>
+struct TakeTypeListItemImpl;
+
+// Do not use enable_if and SFINAE here to avoid MSVC2013 compile failure.
+template <size_t n, typename T, typename... List, typename... Accum>
+struct TakeTypeListItemImpl<n, TypeList<T, List...>, Accum...>
+ : TakeTypeListItemImpl<n - 1, TypeList<List...>, Accum..., T> {};
+
+template <typename T, typename... List, typename... Accum>
+struct TakeTypeListItemImpl<0, TypeList<T, List...>, Accum...> {
+ using Type = TypeList<Accum...>;
+};
+
+template <typename... Accum>
+struct TakeTypeListItemImpl<0, TypeList<>, Accum...> {
+ using Type = TypeList<Accum...>;
+};
+
+// A type-level function that takes first |n| list item from given TypeList.
+// E.g. TakeTypeListItem<3, TypeList<A, B, C, D>> is evaluated to
+// TypeList<A, B, C>.
+template <size_t n, typename List>
+using TakeTypeListItem = typename TakeTypeListItemImpl<n, List>::Type;
+
+// Used for ConcatTypeLists implementation.
+template <typename List1, typename List2>
+struct ConcatTypeListsImpl;
+
+template <typename... Types1, typename... Types2>
+struct ConcatTypeListsImpl<TypeList<Types1...>, TypeList<Types2...>> {
+ using Type = TypeList<Types1..., Types2...>;
+};
+
+// A type-level function that concats two TypeLists.
+template <typename List1, typename List2>
+using ConcatTypeLists = typename ConcatTypeListsImpl<List1, List2>::Type;
+
+// Used for MakeFunctionType implementation.
+template <typename R, typename ArgList>
+struct MakeFunctionTypeImpl;
+
+template <typename R, typename... Args>
+struct MakeFunctionTypeImpl<R, TypeList<Args...>> {
+ // MSVC 2013 doesn't support Type Alias of function types.
+ // Revisit this after we update it to newer version.
+ typedef R Type(Args...);
+};
+
+// A type-level function that constructs a function type that has |R| as its
+// return type and has TypeLists items as its arguments.
+template <typename R, typename ArgList>
+using MakeFunctionType = typename MakeFunctionTypeImpl<R, ArgList>::Type;
+
+// Used for ExtractArgs and ExtractReturnType.
+template <typename Signature>
+struct ExtractArgsImpl;
+
+template <typename R, typename... Args>
+struct ExtractArgsImpl<R(Args...)> {
+ using ReturnType = R;
+ using ArgsList = TypeList<Args...>;
+};
+
+// A type-level function that extracts function arguments into a TypeList.
+// E.g. ExtractArgs<R(A, B, C)> is evaluated to TypeList<A, B, C>.
+template <typename Signature>
+using ExtractArgs = typename ExtractArgsImpl<Signature>::ArgsList;
+
+// A type-level function that extracts the return type of a function.
+// E.g. ExtractReturnType<R(A, B, C)> is evaluated to R.
+template <typename Signature>
+using ExtractReturnType = typename ExtractArgsImpl<Signature>::ReturnType;
+
+template <typename Callable,
+ typename Signature = decltype(&Callable::operator())>
+struct ExtractCallableRunTypeImpl;
+
+template <typename Callable, typename R, typename... Args>
+struct ExtractCallableRunTypeImpl<Callable, R (Callable::*)(Args...)> {
+ using Type = R(Args...);
+};
+
+template <typename Callable, typename R, typename... Args>
+struct ExtractCallableRunTypeImpl<Callable, R (Callable::*)(Args...) const> {
+ using Type = R(Args...);
+};
+
+// Evaluated to RunType of the given callable type.
+// Example:
+// auto f = [](int, char*) { return 0.1; };
+// ExtractCallableRunType<decltype(f)>
+// is evaluated to
+// double(int, char*);
+template <typename Callable>
+using ExtractCallableRunType =
+ typename ExtractCallableRunTypeImpl<Callable>::Type;
+
+// IsCallableObject<Functor> is std::true_type if |Functor| has operator().
+// Otherwise, it's std::false_type.
+// Example:
+// IsCallableObject<void(*)()>::value is false.
+//
+// struct Foo {};
+// IsCallableObject<void(Foo::*)()>::value is false.
+//
+// int i = 0;
+// auto f = [i]() {};
+// IsCallableObject<decltype(f)>::value is false.
+template <typename Functor, typename SFINAE = void>
+struct IsCallableObject : std::false_type {};
+
+template <typename Callable>
+struct IsCallableObject<Callable, void_t<decltype(&Callable::operator())>>
+ : std::true_type {};
+
+// HasRefCountedTypeAsRawPtr selects true_type when any of the |Args| is a raw
+// pointer to a RefCounted type.
+// Implementation note: This non-specialized case handles zero-arity case only.
+// Non-zero-arity cases should be handled by the specialization below.
+template <typename... Args>
+struct HasRefCountedTypeAsRawPtr : std::false_type {};
+
+// Implementation note: Select true_type if the first parameter is a raw pointer
+// to a RefCounted type. Otherwise, skip the first parameter and check rest of
+// parameters recursively.
+template <typename T, typename... Args>
+struct HasRefCountedTypeAsRawPtr<T, Args...>
+ : std::conditional_t<NeedsScopedRefptrButGetsRawPtr<T>::value,
+ std::true_type,
+ HasRefCountedTypeAsRawPtr<Args...>> {};
+
+// ForceVoidReturn<>
+//
+// Set of templates that support forcing the function return type to void.
+template <typename Sig>
+struct ForceVoidReturn;
+
+template <typename R, typename... Args>
+struct ForceVoidReturn<R(Args...)> {
+ using RunType = void(Args...);
+};
+
+// FunctorTraits<>
+//
+// See description at top of file.
+template <typename Functor, typename SFINAE>
+struct FunctorTraits;
+
+// For empty callable types.
+// This specialization is intended to allow binding captureless lambdas, based
+// on the fact that captureless lambdas are empty while capturing lambdas are
+// not. This also allows any functors as far as it's an empty class.
+// Example:
+//
+// // Captureless lambdas are allowed.
+// []() {return 42;};
+//
+// // Capturing lambdas are *not* allowed.
+// int x;
+// [x]() {return x;};
+//
+// // Any empty class with operator() is allowed.
+// struct Foo {
+// void operator()() const {}
+// // No non-static member variable and no virtual functions.
+// };
+template <typename Functor>
+struct FunctorTraits<Functor,
+ std::enable_if_t<IsCallableObject<Functor>::value &&
+ std::is_empty<Functor>::value>> {
+ using RunType = ExtractCallableRunType<Functor>;
+ static constexpr bool is_method = false;
+ static constexpr bool is_nullable = false;
+
+ template <typename RunFunctor, typename... RunArgs>
+ static ExtractReturnType<RunType> Invoke(RunFunctor&& functor,
+ RunArgs&&... args) {
+ return std::forward<RunFunctor>(functor)(std::forward<RunArgs>(args)...);
+ }
+};
+
+// For functions.
+template <typename R, typename... Args>
+struct FunctorTraits<R (*)(Args...)> {
+ using RunType = R(Args...);
+ static constexpr bool is_method = false;
+ static constexpr bool is_nullable = true;
+
+ template <typename Function, typename... RunArgs>
+ static R Invoke(Function&& function, RunArgs&&... args) {
+ return std::forward<Function>(function)(std::forward<RunArgs>(args)...);
+ }
+};
+
+#if defined(OS_WIN) && !defined(ARCH_CPU_64_BITS)
+
+// For functions.
+template <typename R, typename... Args>
+struct FunctorTraits<R(__stdcall*)(Args...)> {
+ using RunType = R(Args...);
+ static constexpr bool is_method = false;
+ static constexpr bool is_nullable = true;
+
+ template <typename... RunArgs>
+ static R Invoke(R(__stdcall* function)(Args...), RunArgs&&... args) {
+ return function(std::forward<RunArgs>(args)...);
+ }
+};
+
+// For functions.
+template <typename R, typename... Args>
+struct FunctorTraits<R(__fastcall*)(Args...)> {
+ using RunType = R(Args...);
+ static constexpr bool is_method = false;
+ static constexpr bool is_nullable = true;
+
+ template <typename... RunArgs>
+ static R Invoke(R(__fastcall* function)(Args...), RunArgs&&... args) {
+ return function(std::forward<RunArgs>(args)...);
+ }
+};
+
+#endif // defined(OS_WIN) && !defined(ARCH_CPU_64_BITS)
+
+#if defined(OS_MACOSX)
+
+// Support for Objective-C blocks. There are two implementation depending
+// on whether Automated Reference Counting (ARC) is enabled. When ARC is
+// enabled, then the block itself can be bound as the compiler will ensure
+// its lifetime will be correctly managed. Otherwise, require the block to
+// be wrapped in a base::mac::ScopedBlock (via base::RetainBlock) that will
+// correctly manage the block lifetime.
+//
+// The two implementation ensure that the One Definition Rule (ODR) is not
+// broken (it is not possible to write a template base::RetainBlock that would
+// work correctly both with ARC enabled and disabled).
+
+#if HAS_FEATURE(objc_arc)
+
+template <typename R, typename... Args>
+struct FunctorTraits<R (^)(Args...)> {
+ using RunType = R(Args...);
+ static constexpr bool is_method = false;
+ static constexpr bool is_nullable = true;
+
+ template <typename BlockType, typename... RunArgs>
+ static R Invoke(BlockType&& block, RunArgs&&... args) {
+ // According to LLVM documentation (§ 6.3), "local variables of automatic
+ // storage duration do not have precise lifetime." Use objc_precise_lifetime
+ // to ensure that the Objective-C block is not deallocated until it has
+ // finished executing even if the Callback<> is destroyed during the block
+ // execution.
+ // https://clang.llvm.org/docs/AutomaticReferenceCounting.html#precise-lifetime-semantics
+ __attribute__((objc_precise_lifetime)) R (^scoped_block)(Args...) = block;
+ return scoped_block(std::forward<RunArgs>(args)...);
+ }
+};
+
+#else // HAS_FEATURE(objc_arc)
+
+template <typename R, typename... Args>
+struct FunctorTraits<base::mac::ScopedBlock<R (^)(Args...)>> {
+ using RunType = R(Args...);
+ static constexpr bool is_method = false;
+ static constexpr bool is_nullable = true;
+
+ template <typename BlockType, typename... RunArgs>
+ static R Invoke(BlockType&& block, RunArgs&&... args) {
+ // Copy the block to ensure that the Objective-C block is not deallocated
+ // until it has finished executing even if the Callback<> is destroyed
+ // during the block execution.
+ base::mac::ScopedBlock<R (^)(Args...)> scoped_block(block);
+ return scoped_block.get()(std::forward<RunArgs>(args)...);
+ }
+};
+
+#endif // HAS_FEATURE(objc_arc)
+#endif // defined(OS_MACOSX)
+
+// For methods.
+template <typename R, typename Receiver, typename... Args>
+struct FunctorTraits<R (Receiver::*)(Args...)> {
+ using RunType = R(Receiver*, Args...);
+ static constexpr bool is_method = true;
+ static constexpr bool is_nullable = true;
+
+ template <typename Method, typename ReceiverPtr, typename... RunArgs>
+ static R Invoke(Method method,
+ ReceiverPtr&& receiver_ptr,
+ RunArgs&&... args) {
+ return ((*receiver_ptr).*method)(std::forward<RunArgs>(args)...);
+ }
+};
+
+// For const methods.
+template <typename R, typename Receiver, typename... Args>
+struct FunctorTraits<R (Receiver::*)(Args...) const> {
+ using RunType = R(const Receiver*, Args...);
+ static constexpr bool is_method = true;
+ static constexpr bool is_nullable = true;
+
+ template <typename Method, typename ReceiverPtr, typename... RunArgs>
+ static R Invoke(Method method,
+ ReceiverPtr&& receiver_ptr,
+ RunArgs&&... args) {
+ return ((*receiver_ptr).*method)(std::forward<RunArgs>(args)...);
+ }
+};
+
+#ifdef __cpp_noexcept_function_type
+// noexcept makes a distinct function type in C++17.
+// I.e. `void(*)()` and `void(*)() noexcept` are same in pre-C++17, and
+// different in C++17.
+template <typename R, typename... Args>
+struct FunctorTraits<R (*)(Args...) noexcept> : FunctorTraits<R (*)(Args...)> {
+};
+
+template <typename R, typename Receiver, typename... Args>
+struct FunctorTraits<R (Receiver::*)(Args...) noexcept>
+ : FunctorTraits<R (Receiver::*)(Args...)> {};
+
+template <typename R, typename Receiver, typename... Args>
+struct FunctorTraits<R (Receiver::*)(Args...) const noexcept>
+ : FunctorTraits<R (Receiver::*)(Args...) const> {};
+#endif
+
+// For IgnoreResults.
+template <typename T>
+struct FunctorTraits<IgnoreResultHelper<T>> : FunctorTraits<T> {
+ using RunType =
+ typename ForceVoidReturn<typename FunctorTraits<T>::RunType>::RunType;
+
+ template <typename IgnoreResultType, typename... RunArgs>
+ static void Invoke(IgnoreResultType&& ignore_result_helper,
+ RunArgs&&... args) {
+ FunctorTraits<T>::Invoke(
+ std::forward<IgnoreResultType>(ignore_result_helper).functor_,
+ std::forward<RunArgs>(args)...);
+ }
+};
+
+// For OnceCallbacks.
+template <typename R, typename... Args>
+struct FunctorTraits<OnceCallback<R(Args...)>> {
+ using RunType = R(Args...);
+ static constexpr bool is_method = false;
+ static constexpr bool is_nullable = true;
+
+ template <typename CallbackType, typename... RunArgs>
+ static R Invoke(CallbackType&& callback, RunArgs&&... args) {
+ DCHECK(!callback.is_null());
+ return std::forward<CallbackType>(callback).Run(
+ std::forward<RunArgs>(args)...);
+ }
+};
+
+// For RepeatingCallbacks.
+template <typename R, typename... Args>
+struct FunctorTraits<RepeatingCallback<R(Args...)>> {
+ using RunType = R(Args...);
+ static constexpr bool is_method = false;
+ static constexpr bool is_nullable = true;
+
+ template <typename CallbackType, typename... RunArgs>
+ static R Invoke(CallbackType&& callback, RunArgs&&... args) {
+ DCHECK(!callback.is_null());
+ return std::forward<CallbackType>(callback).Run(
+ std::forward<RunArgs>(args)...);
+ }
+};
+
+template <typename Functor>
+using MakeFunctorTraits = FunctorTraits<std::decay_t<Functor>>;
+
+// InvokeHelper<>
+//
+// There are 2 logical InvokeHelper<> specializations: normal, WeakCalls.
+//
+// The normal type just calls the underlying runnable.
+//
+// WeakCalls need special syntax that is applied to the first argument to check
+// if they should no-op themselves.
+template <bool is_weak_call, typename ReturnType>
+struct InvokeHelper;
+
+template <typename ReturnType>
+struct InvokeHelper<false, ReturnType> {
+ template <typename Functor, typename... RunArgs>
+ static inline ReturnType MakeItSo(Functor&& functor, RunArgs&&... args) {
+ using Traits = MakeFunctorTraits<Functor>;
+ return Traits::Invoke(std::forward<Functor>(functor),
+ std::forward<RunArgs>(args)...);
+ }
+};
+
+template <typename ReturnType>
+struct InvokeHelper<true, ReturnType> {
+ // WeakCalls are only supported for functions with a void return type.
+ // Otherwise, the function result would be undefined if the the WeakPtr<>
+ // is invalidated.
+ static_assert(std::is_void<ReturnType>::value,
+ "weak_ptrs can only bind to methods without return values");
+
+ template <typename Functor, typename BoundWeakPtr, typename... RunArgs>
+ static inline void MakeItSo(Functor&& functor,
+ BoundWeakPtr&& weak_ptr,
+ RunArgs&&... args) {
+ if (!weak_ptr)
+ return;
+ using Traits = MakeFunctorTraits<Functor>;
+ Traits::Invoke(std::forward<Functor>(functor),
+ std::forward<BoundWeakPtr>(weak_ptr),
+ std::forward<RunArgs>(args)...);
+ }
+};
+
+// Invoker<>
+//
+// See description at the top of the file.
+template <typename StorageType, typename UnboundRunType>
+struct Invoker;
+
+template <typename StorageType, typename R, typename... UnboundArgs>
+struct Invoker<StorageType, R(UnboundArgs...)> {
+ static R RunOnce(BindStateBase* base,
+ PassingType<UnboundArgs>... unbound_args) {
+ // Local references to make debugger stepping easier. If in a debugger,
+ // you really want to warp ahead and step through the
+ // InvokeHelper<>::MakeItSo() call below.
+ StorageType* storage = static_cast<StorageType*>(base);
+ static constexpr size_t num_bound_args =
+ std::tuple_size<decltype(storage->bound_args_)>::value;
+ return RunImpl(std::move(storage->functor_),
+ std::move(storage->bound_args_),
+ std::make_index_sequence<num_bound_args>(),
+ std::forward<UnboundArgs>(unbound_args)...);
+ }
+
+ static R Run(BindStateBase* base, PassingType<UnboundArgs>... unbound_args) {
+ // Local references to make debugger stepping easier. If in a debugger,
+ // you really want to warp ahead and step through the
+ // InvokeHelper<>::MakeItSo() call below.
+ const StorageType* storage = static_cast<StorageType*>(base);
+ static constexpr size_t num_bound_args =
+ std::tuple_size<decltype(storage->bound_args_)>::value;
+ return RunImpl(storage->functor_, storage->bound_args_,
+ std::make_index_sequence<num_bound_args>(),
+ std::forward<UnboundArgs>(unbound_args)...);
+ }
+
+ private:
+ template <typename Functor, typename BoundArgsTuple, size_t... indices>
+ static inline R RunImpl(Functor&& functor,
+ BoundArgsTuple&& bound,
+ std::index_sequence<indices...>,
+ UnboundArgs&&... unbound_args) {
+ static constexpr bool is_method = MakeFunctorTraits<Functor>::is_method;
+
+ using DecayedArgsTuple = std::decay_t<BoundArgsTuple>;
+ static constexpr bool is_weak_call =
+ IsWeakMethod<is_method,
+ std::tuple_element_t<indices, DecayedArgsTuple>...>();
+
+ return InvokeHelper<is_weak_call, R>::MakeItSo(
+ std::forward<Functor>(functor),
+ Unwrap(std::get<indices>(std::forward<BoundArgsTuple>(bound)))...,
+ std::forward<UnboundArgs>(unbound_args)...);
+ }
+};
+
+// Extracts necessary type info from Functor and BoundArgs.
+// Used to implement MakeUnboundRunType, BindOnce and BindRepeating.
+template <typename Functor, typename... BoundArgs>
+struct BindTypeHelper {
+ static constexpr size_t num_bounds = sizeof...(BoundArgs);
+ using FunctorTraits = MakeFunctorTraits<Functor>;
+
+ // Example:
+ // When Functor is `double (Foo::*)(int, const std::string&)`, and BoundArgs
+ // is a template pack of `Foo*` and `int16_t`:
+ // - RunType is `double(Foo*, int, const std::string&)`,
+ // - ReturnType is `double`,
+ // - RunParamsList is `TypeList<Foo*, int, const std::string&>`,
+ // - BoundParamsList is `TypeList<Foo*, int>`,
+ // - UnboundParamsList is `TypeList<const std::string&>`,
+ // - BoundArgsList is `TypeList<Foo*, int16_t>`,
+ // - UnboundRunType is `double(const std::string&)`.
+ using RunType = typename FunctorTraits::RunType;
+ using ReturnType = ExtractReturnType<RunType>;
+
+ using RunParamsList = ExtractArgs<RunType>;
+ using BoundParamsList = TakeTypeListItem<num_bounds, RunParamsList>;
+ using UnboundParamsList = DropTypeListItem<num_bounds, RunParamsList>;
+
+ using BoundArgsList = TypeList<BoundArgs...>;
+
+ using UnboundRunType = MakeFunctionType<ReturnType, UnboundParamsList>;
+};
+
+template <typename Functor>
+std::enable_if_t<FunctorTraits<Functor>::is_nullable, bool> IsNull(
+ const Functor& functor) {
+ return !functor;
+}
+
+template <typename Functor>
+std::enable_if_t<!FunctorTraits<Functor>::is_nullable, bool> IsNull(
+ const Functor&) {
+ return false;
+}
+
+// Used by QueryCancellationTraits below.
+template <typename Functor, typename BoundArgsTuple, size_t... indices>
+bool QueryCancellationTraitsImpl(BindStateBase::CancellationQueryMode mode,
+ const Functor& functor,
+ const BoundArgsTuple& bound_args,
+ std::index_sequence<indices...>) {
+ switch (mode) {
+ case BindStateBase::IS_CANCELLED:
+ return CallbackCancellationTraits<Functor, BoundArgsTuple>::IsCancelled(
+ functor, std::get<indices>(bound_args)...);
+ case BindStateBase::MAYBE_VALID:
+ return CallbackCancellationTraits<Functor, BoundArgsTuple>::MaybeValid(
+ functor, std::get<indices>(bound_args)...);
+ }
+ NOTREACHED();
+}
+
+// Relays |base| to corresponding CallbackCancellationTraits<>::Run(). Returns
+// true if the callback |base| represents is canceled.
+template <typename BindStateType>
+bool QueryCancellationTraits(const BindStateBase* base,
+ BindStateBase::CancellationQueryMode mode) {
+ const BindStateType* storage = static_cast<const BindStateType*>(base);
+ static constexpr size_t num_bound_args =
+ std::tuple_size<decltype(storage->bound_args_)>::value;
+ return QueryCancellationTraitsImpl(
+ mode, storage->functor_, storage->bound_args_,
+ std::make_index_sequence<num_bound_args>());
+}
+
+// The base case of BanUnconstructedRefCountedReceiver that checks nothing.
+template <typename Functor, typename Receiver, typename... Unused>
+std::enable_if_t<
+ !(MakeFunctorTraits<Functor>::is_method &&
+ std::is_pointer<std::decay_t<Receiver>>::value &&
+ IsRefCountedType<std::remove_pointer_t<std::decay_t<Receiver>>>::value)>
+BanUnconstructedRefCountedReceiver(const Receiver& receiver, Unused&&...) {}
+
+template <typename Functor>
+void BanUnconstructedRefCountedReceiver() {}
+
+// Asserts that Callback is not the first owner of a ref-counted receiver.
+template <typename Functor, typename Receiver, typename... Unused>
+std::enable_if_t<
+ MakeFunctorTraits<Functor>::is_method &&
+ std::is_pointer<std::decay_t<Receiver>>::value &&
+ IsRefCountedType<std::remove_pointer_t<std::decay_t<Receiver>>>::value>
+BanUnconstructedRefCountedReceiver(const Receiver& receiver, Unused&&...) {
+ DCHECK(receiver);
+
+ // It's error prone to make the implicit first reference to ref-counted types.
+ // In the example below, base::BindOnce() makes the implicit first reference
+ // to the ref-counted Foo. If PostTask() failed or the posted task ran fast
+ // enough, the newly created instance can be destroyed before |oo| makes
+ // another reference.
+ // Foo::Foo() {
+ // base::PostTask(FROM_HERE, base::BindOnce(&Foo::Bar, this));
+ // }
+ //
+ // scoped_refptr<Foo> oo = new Foo();
+ //
+ // Instead of doing like above, please consider adding a static constructor,
+ // and keep the first reference alive explicitly.
+ // // static
+ // scoped_refptr<Foo> Foo::Create() {
+ // auto foo = base::WrapRefCounted(new Foo());
+ // base::PostTask(FROM_HERE, base::BindOnce(&Foo::Bar, foo));
+ // return foo;
+ // }
+ //
+ // Foo::Foo() {}
+ //
+ // scoped_refptr<Foo> oo = Foo::Create();
+ DCHECK(receiver->HasAtLeastOneRef())
+ << "base::Bind{Once,Repeating}() refuses to create the first reference "
+ "to ref-counted objects. That typically happens around PostTask() in "
+ "their constructor, and such objects can be destroyed before `new` "
+ "returns if the task resolves fast enough.";
+}
+
+// BindState<>
+//
+// This stores all the state passed into Bind().
+template <typename Functor, typename... BoundArgs>
+struct BindState final : BindStateBase {
+ using IsCancellable = std::integral_constant<
+ bool,
+ CallbackCancellationTraits<Functor,
+ std::tuple<BoundArgs...>>::is_cancellable>;
+
+ template <typename ForwardFunctor, typename... ForwardBoundArgs>
+ static BindState* Create(BindStateBase::InvokeFuncStorage invoke_func,
+ ForwardFunctor&& functor,
+ ForwardBoundArgs&&... bound_args) {
+ // Ban ref counted receivers that were not yet fully constructed to avoid
+ // a common pattern of racy situation.
+ BanUnconstructedRefCountedReceiver<ForwardFunctor>(bound_args...);
+
+ // IsCancellable is std::false_type if
+ // CallbackCancellationTraits<>::IsCancelled returns always false.
+ // Otherwise, it's std::true_type.
+ return new BindState(IsCancellable{}, invoke_func,
+ std::forward<ForwardFunctor>(functor),
+ std::forward<ForwardBoundArgs>(bound_args)...);
+ }
+
+ Functor functor_;
+ std::tuple<BoundArgs...> bound_args_;
+
+ private:
+ template <typename ForwardFunctor, typename... ForwardBoundArgs>
+ explicit BindState(std::true_type,
+ BindStateBase::InvokeFuncStorage invoke_func,
+ ForwardFunctor&& functor,
+ ForwardBoundArgs&&... bound_args)
+ : BindStateBase(invoke_func,
+ &Destroy,
+ &QueryCancellationTraits<BindState>),
+ functor_(std::forward<ForwardFunctor>(functor)),
+ bound_args_(std::forward<ForwardBoundArgs>(bound_args)...) {
+ DCHECK(!IsNull(functor_));
+ }
+
+ template <typename ForwardFunctor, typename... ForwardBoundArgs>
+ explicit BindState(std::false_type,
+ BindStateBase::InvokeFuncStorage invoke_func,
+ ForwardFunctor&& functor,
+ ForwardBoundArgs&&... bound_args)
+ : BindStateBase(invoke_func, &Destroy),
+ functor_(std::forward<ForwardFunctor>(functor)),
+ bound_args_(std::forward<ForwardBoundArgs>(bound_args)...) {
+ DCHECK(!IsNull(functor_));
+ }
+
+ ~BindState() = default;
+
+ static void Destroy(const BindStateBase* self) {
+ delete static_cast<const BindState*>(self);
+ }
+};
+
+// Used to implement MakeBindStateType.
+template <bool is_method, typename Functor, typename... BoundArgs>
+struct MakeBindStateTypeImpl;
+
+template <typename Functor, typename... BoundArgs>
+struct MakeBindStateTypeImpl<false, Functor, BoundArgs...> {
+ static_assert(!HasRefCountedTypeAsRawPtr<std::decay_t<BoundArgs>...>::value,
+ "A parameter is a refcounted type and needs scoped_refptr.");
+ using Type = BindState<std::decay_t<Functor>, std::decay_t<BoundArgs>...>;
+};
+
+template <typename Functor>
+struct MakeBindStateTypeImpl<true, Functor> {
+ using Type = BindState<std::decay_t<Functor>>;
+};
+
+template <typename Functor, typename Receiver, typename... BoundArgs>
+struct MakeBindStateTypeImpl<true, Functor, Receiver, BoundArgs...> {
+ private:
+ using DecayedReceiver = std::decay_t<Receiver>;
+
+ static_assert(!std::is_array<std::remove_reference_t<Receiver>>::value,
+ "First bound argument to a method cannot be an array.");
+ static_assert(
+ !std::is_pointer<DecayedReceiver>::value ||
+ IsRefCountedType<std::remove_pointer_t<DecayedReceiver>>::value,
+ "Receivers may not be raw pointers. If using a raw pointer here is safe"
+ " and has no lifetime concerns, use base::Unretained() and document why"
+ " it's safe.");
+ static_assert(!HasRefCountedTypeAsRawPtr<std::decay_t<BoundArgs>...>::value,
+ "A parameter is a refcounted type and needs scoped_refptr.");
+
+ public:
+ using Type = BindState<
+ std::decay_t<Functor>,
+ std::conditional_t<std::is_pointer<DecayedReceiver>::value,
+ scoped_refptr<std::remove_pointer_t<DecayedReceiver>>,
+ DecayedReceiver>,
+ std::decay_t<BoundArgs>...>;
+};
+
+template <typename Functor, typename... BoundArgs>
+using MakeBindStateType =
+ typename MakeBindStateTypeImpl<MakeFunctorTraits<Functor>::is_method,
+ Functor,
+ BoundArgs...>::Type;
+
+} // namespace internal
+
+// An injection point to control |this| pointer behavior on a method invocation.
+// If IsWeakReceiver<> is true_type for |T| and |T| is used for a receiver of a
+// method, base::Bind cancels the method invocation if the receiver is tested as
+// false.
+// E.g. Foo::bar() is not called:
+// struct Foo : base::SupportsWeakPtr<Foo> {
+// void bar() {}
+// };
+//
+// WeakPtr<Foo> oo = nullptr;
+// base::BindOnce(&Foo::bar, oo).Run();
+template <typename T>
+struct IsWeakReceiver : std::false_type {};
+
+template <typename T>
+struct IsWeakReceiver<std::reference_wrapper<T>> : IsWeakReceiver<T> {};
+
+template <typename T>
+struct IsWeakReceiver<WeakPtr<T>> : std::true_type {};
+
+// An injection point to control how bound objects passed to the target
+// function. BindUnwrapTraits<>::Unwrap() is called for each bound objects right
+// before the target function is invoked.
+template <typename>
+struct BindUnwrapTraits {
+ template <typename T>
+ static T&& Unwrap(T&& o) {
+ return std::forward<T>(o);
+ }
+};
+
+template <typename T>
+struct BindUnwrapTraits<internal::UnretainedWrapper<T>> {
+ static T* Unwrap(const internal::UnretainedWrapper<T>& o) { return o.get(); }
+};
+
+template <typename T>
+struct BindUnwrapTraits<std::reference_wrapper<T>> {
+ static T& Unwrap(std::reference_wrapper<T> o) { return o.get(); }
+};
+
+template <typename T>
+struct BindUnwrapTraits<internal::RetainedRefWrapper<T>> {
+ static T* Unwrap(const internal::RetainedRefWrapper<T>& o) { return o.get(); }
+};
+
+template <typename T, typename Deleter>
+struct BindUnwrapTraits<internal::OwnedWrapper<T, Deleter>> {
+ static T* Unwrap(const internal::OwnedWrapper<T, Deleter>& o) {
+ return o.get();
+ }
+};
+
+template <typename T>
+struct BindUnwrapTraits<internal::PassedWrapper<T>> {
+ static T Unwrap(const internal::PassedWrapper<T>& o) { return o.Take(); }
+};
+
+#if defined(OS_WIN)
+template <typename T>
+struct BindUnwrapTraits<Microsoft::WRL::ComPtr<T>> {
+ static T* Unwrap(const Microsoft::WRL::ComPtr<T>& ptr) { return ptr.Get(); }
+};
+#endif
+
+// CallbackCancellationTraits allows customization of Callback's cancellation
+// semantics. By default, callbacks are not cancellable. A specialization should
+// set is_cancellable = true and implement an IsCancelled() that returns if the
+// callback should be cancelled.
+template <typename Functor, typename BoundArgsTuple, typename SFINAE>
+struct CallbackCancellationTraits {
+ static constexpr bool is_cancellable = false;
+};
+
+// Specialization for method bound to weak pointer receiver.
+template <typename Functor, typename... BoundArgs>
+struct CallbackCancellationTraits<
+ Functor,
+ std::tuple<BoundArgs...>,
+ std::enable_if_t<
+ internal::IsWeakMethod<internal::FunctorTraits<Functor>::is_method,
+ BoundArgs...>::value>> {
+ static constexpr bool is_cancellable = true;
+
+ template <typename Receiver, typename... Args>
+ static bool IsCancelled(const Functor&,
+ const Receiver& receiver,
+ const Args&...) {
+ return !receiver;
+ }
+
+ template <typename Receiver, typename... Args>
+ static bool MaybeValid(const Functor&,
+ const Receiver& receiver,
+ const Args&...) {
+ return receiver.MaybeValid();
+ }
+};
+
+// Specialization for a nested bind.
+template <typename Signature, typename... BoundArgs>
+struct CallbackCancellationTraits<OnceCallback<Signature>,
+ std::tuple<BoundArgs...>> {
+ static constexpr bool is_cancellable = true;
+
+ template <typename Functor>
+ static bool IsCancelled(const Functor& functor, const BoundArgs&...) {
+ return functor.IsCancelled();
+ }
+
+ template <typename Functor>
+ static bool MaybeValid(const Functor& functor, const BoundArgs&...) {
+ return functor.MaybeValid();
+ }
+};
+
+template <typename Signature, typename... BoundArgs>
+struct CallbackCancellationTraits<RepeatingCallback<Signature>,
+ std::tuple<BoundArgs...>> {
+ static constexpr bool is_cancellable = true;
+
+ template <typename Functor>
+ static bool IsCancelled(const Functor& functor, const BoundArgs&...) {
+ return functor.IsCancelled();
+ }
+
+ template <typename Functor>
+ static bool MaybeValid(const Functor& functor, const BoundArgs&...) {
+ return functor.MaybeValid();
+ }
+};
+
+// Returns a RunType of bound functor.
+// E.g. MakeUnboundRunType<R(A, B, C), A, B> is evaluated to R(C).
+template <typename Functor, typename... BoundArgs>
+using MakeUnboundRunType =
+ typename internal::BindTypeHelper<Functor, BoundArgs...>::UnboundRunType;
+
+} // namespace base
+
+#endif // BASE_BIND_INTERNAL_H_
diff --git a/security/sandbox/chromium/base/bit_cast.h b/security/sandbox/chromium/base/bit_cast.h
new file mode 100644
index 0000000000..90dd925e86
--- /dev/null
+++ b/security/sandbox/chromium/base/bit_cast.h
@@ -0,0 +1,77 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_BIT_CAST_H_
+#define BASE_BIT_CAST_H_
+
+#include <string.h>
+#include <type_traits>
+
+#include "base/compiler_specific.h"
+#include "base/template_util.h"
+#include "build/build_config.h"
+
+// bit_cast<Dest,Source> is a template function that implements the equivalent
+// of "*reinterpret_cast<Dest*>(&source)". We need this in very low-level
+// functions like the protobuf library and fast math support.
+//
+// float f = 3.14159265358979;
+// int i = bit_cast<int32_t>(f);
+// // i = 0x40490fdb
+//
+// The classical address-casting method is:
+//
+// // WRONG
+// float f = 3.14159265358979; // WRONG
+// int i = * reinterpret_cast<int*>(&f); // WRONG
+//
+// The address-casting method actually produces undefined behavior according to
+// the ISO C++98 specification, section 3.10 ("basic.lval"), paragraph 15.
+// (This did not substantially change in C++11.) Roughly, this section says: if
+// an object in memory has one type, and a program accesses it with a different
+// type, then the result is undefined behavior for most values of "different
+// type".
+//
+// This is true for any cast syntax, either *(int*)&f or
+// *reinterpret_cast<int*>(&f). And it is particularly true for conversions
+// between integral lvalues and floating-point lvalues.
+//
+// The purpose of this paragraph is to allow optimizing compilers to assume that
+// expressions with different types refer to different memory. Compilers are
+// known to take advantage of this. So a non-conforming program quietly
+// produces wildly incorrect output.
+//
+// The problem is not the use of reinterpret_cast. The problem is type punning:
+// holding an object in memory of one type and reading its bits back using a
+// different type.
+//
+// The C++ standard is more subtle and complex than this, but that is the basic
+// idea.
+//
+// Anyways ...
+//
+// bit_cast<> calls memcpy() which is blessed by the standard, especially by the
+// example in section 3.9 . Also, of course, bit_cast<> wraps up the nasty
+// logic in one place.
+//
+// Fortunately memcpy() is very fast. In optimized mode, compilers replace
+// calls to memcpy() with inline object code when the size argument is a
+// compile-time constant. On a 32-bit system, memcpy(d,s,4) compiles to one
+// load and one store, and memcpy(d,s,8) compiles to two loads and two stores.
+
+template <class Dest, class Source>
+inline Dest bit_cast(const Source& source) {
+ static_assert(sizeof(Dest) == sizeof(Source),
+ "bit_cast requires source and destination to be the same size");
+ static_assert(base::is_trivially_copyable<Dest>::value,
+ "bit_cast requires the destination type to be copyable");
+ static_assert(base::is_trivially_copyable<Source>::value,
+ "bit_cast requires the source type to be copyable");
+
+ Dest dest;
+ memcpy(&dest, &source, sizeof(dest));
+ return dest;
+}
+
+#endif // BASE_BIT_CAST_H_
diff --git a/security/sandbox/chromium/base/bits.h b/security/sandbox/chromium/base/bits.h
new file mode 100644
index 0000000000..d2c5ac9caa
--- /dev/null
+++ b/security/sandbox/chromium/base/bits.h
@@ -0,0 +1,209 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file defines some bit utilities.
+
+#ifndef BASE_BITS_H_
+#define BASE_BITS_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <type_traits>
+
+#include "base/compiler_specific.h"
+#include "base/logging.h"
+#include "build/build_config.h"
+
+#if defined(COMPILER_MSVC)
+#include <intrin.h>
+#endif
+
+namespace base {
+namespace bits {
+
+// Returns true iff |value| is a power of 2.
+template <typename T,
+ typename = typename std::enable_if<std::is_integral<T>::value>>
+constexpr inline bool IsPowerOfTwo(T value) {
+ // From "Hacker's Delight": Section 2.1 Manipulating Rightmost Bits.
+ //
+ // Only positive integers with a single bit set are powers of two. If only one
+ // bit is set in x (e.g. 0b00000100000000) then |x-1| will have that bit set
+ // to zero and all bits to its right set to 1 (e.g. 0b00000011111111). Hence
+ // |x & (x-1)| is 0 iff x is a power of two.
+ return value > 0 && (value & (value - 1)) == 0;
+}
+
+// Round up |size| to a multiple of alignment, which must be a power of two.
+inline size_t Align(size_t size, size_t alignment) {
+ DCHECK(IsPowerOfTwo(alignment));
+ return (size + alignment - 1) & ~(alignment - 1);
+}
+
+// Round down |size| to a multiple of alignment, which must be a power of two.
+inline size_t AlignDown(size_t size, size_t alignment) {
+ DCHECK(IsPowerOfTwo(alignment));
+ return size & ~(alignment - 1);
+}
+
+// CountLeadingZeroBits(value) returns the number of zero bits following the
+// most significant 1 bit in |value| if |value| is non-zero, otherwise it
+// returns {sizeof(T) * 8}.
+// Example: 00100010 -> 2
+//
+// CountTrailingZeroBits(value) returns the number of zero bits preceding the
+// least significant 1 bit in |value| if |value| is non-zero, otherwise it
+// returns {sizeof(T) * 8}.
+// Example: 00100010 -> 1
+//
+// C does not have an operator to do this, but fortunately the various
+// compilers have built-ins that map to fast underlying processor instructions.
+#if defined(COMPILER_MSVC)
+
+template <typename T, unsigned bits = sizeof(T) * 8>
+ALWAYS_INLINE
+ typename std::enable_if<std::is_unsigned<T>::value && sizeof(T) <= 4,
+ unsigned>::type
+ CountLeadingZeroBits(T x) {
+ static_assert(bits > 0, "invalid instantiation");
+ unsigned long index;
+ return LIKELY(_BitScanReverse(&index, static_cast<uint32_t>(x)))
+ ? (31 - index - (32 - bits))
+ : bits;
+}
+
+template <typename T, unsigned bits = sizeof(T) * 8>
+ALWAYS_INLINE
+ typename std::enable_if<std::is_unsigned<T>::value && sizeof(T) == 8,
+ unsigned>::type
+ CountLeadingZeroBits(T x) {
+ static_assert(bits > 0, "invalid instantiation");
+ unsigned long index;
+// MSVC only supplies _BitScanReverse64 when building for a 64-bit target.
+#if defined(ARCH_CPU_64_BITS)
+ return LIKELY(_BitScanReverse64(&index, static_cast<uint64_t>(x)))
+ ? (63 - index)
+ : 64;
+#else
+ uint32_t left = static_cast<uint32_t>(x >> 32);
+ if (LIKELY(_BitScanReverse(&index, left)))
+ return 31 - index;
+
+ uint32_t right = static_cast<uint32_t>(x);
+ if (LIKELY(_BitScanReverse(&index, right)))
+ return 63 - index;
+
+ return 64;
+#endif
+}
+
+template <typename T, unsigned bits = sizeof(T) * 8>
+ALWAYS_INLINE
+ typename std::enable_if<std::is_unsigned<T>::value && sizeof(T) <= 4,
+ unsigned>::type
+ CountTrailingZeroBits(T x) {
+ static_assert(bits > 0, "invalid instantiation");
+ unsigned long index;
+ return LIKELY(_BitScanForward(&index, static_cast<uint32_t>(x))) ? index
+ : bits;
+}
+
+template <typename T, unsigned bits = sizeof(T) * 8>
+ALWAYS_INLINE
+ typename std::enable_if<std::is_unsigned<T>::value && sizeof(T) == 8,
+ unsigned>::type
+ CountTrailingZeroBits(T x) {
+ static_assert(bits > 0, "invalid instantiation");
+ unsigned long index;
+// MSVC only supplies _BitScanForward64 when building for a 64-bit target.
+#if defined(ARCH_CPU_64_BITS)
+ return LIKELY(_BitScanForward64(&index, static_cast<uint64_t>(x))) ? index
+ : 64;
+#else
+ uint32_t right = static_cast<uint32_t>(x);
+ if (LIKELY(_BitScanForward(&index, right)))
+ return index;
+
+ uint32_t left = static_cast<uint32_t>(x >> 32);
+ if (LIKELY(_BitScanForward(&index, left)))
+ return 32 + index;
+
+ return 64;
+#endif
+}
+
+ALWAYS_INLINE uint32_t CountLeadingZeroBits32(uint32_t x) {
+ return CountLeadingZeroBits(x);
+}
+
+ALWAYS_INLINE uint64_t CountLeadingZeroBits64(uint64_t x) {
+ return CountLeadingZeroBits(x);
+}
+
+#elif defined(COMPILER_GCC)
+
+// __builtin_clz has undefined behaviour for an input of 0, even though there's
+// clearly a return value that makes sense, and even though some processor clz
+// instructions have defined behaviour for 0. We could drop to raw __asm__ to
+// do better, but we'll avoid doing that unless we see proof that we need to.
+template <typename T, unsigned bits = sizeof(T) * 8>
+ALWAYS_INLINE
+ typename std::enable_if<std::is_unsigned<T>::value && sizeof(T) <= 8,
+ unsigned>::type
+ CountLeadingZeroBits(T value) {
+ static_assert(bits > 0, "invalid instantiation");
+ return LIKELY(value)
+ ? bits == 64
+ ? __builtin_clzll(static_cast<uint64_t>(value))
+ : __builtin_clz(static_cast<uint32_t>(value)) - (32 - bits)
+ : bits;
+}
+
+template <typename T, unsigned bits = sizeof(T) * 8>
+ALWAYS_INLINE
+ typename std::enable_if<std::is_unsigned<T>::value && sizeof(T) <= 8,
+ unsigned>::type
+ CountTrailingZeroBits(T value) {
+ return LIKELY(value) ? bits == 64
+ ? __builtin_ctzll(static_cast<uint64_t>(value))
+ : __builtin_ctz(static_cast<uint32_t>(value))
+ : bits;
+}
+
+ALWAYS_INLINE uint32_t CountLeadingZeroBits32(uint32_t x) {
+ return CountLeadingZeroBits(x);
+}
+
+ALWAYS_INLINE uint64_t CountLeadingZeroBits64(uint64_t x) {
+ return CountLeadingZeroBits(x);
+}
+
+#endif
+
+ALWAYS_INLINE size_t CountLeadingZeroBitsSizeT(size_t x) {
+ return CountLeadingZeroBits(x);
+}
+
+ALWAYS_INLINE size_t CountTrailingZeroBitsSizeT(size_t x) {
+ return CountTrailingZeroBits(x);
+}
+
+// Returns the integer i such as 2^i <= n < 2^(i+1)
+inline int Log2Floor(uint32_t n) {
+ return 31 - CountLeadingZeroBits(n);
+}
+
+// Returns the integer i such as 2^(i-1) < n <= 2^i
+inline int Log2Ceiling(uint32_t n) {
+ // When n == 0, we want the function to return -1.
+ // When n == 0, (n - 1) will underflow to 0xFFFFFFFF, which is
+ // why the statement below starts with (n ? 32 : -1).
+ return (n ? 32 : -1) - CountLeadingZeroBits(n - 1);
+}
+
+} // namespace bits
+} // namespace base
+
+#endif // BASE_BITS_H_
diff --git a/security/sandbox/chromium/base/callback.h b/security/sandbox/chromium/base/callback.h
new file mode 100644
index 0000000000..1427faaaea
--- /dev/null
+++ b/security/sandbox/chromium/base/callback.h
@@ -0,0 +1,149 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// NOTE: Header files that do not require the full definition of
+// base::{Once,Repeating}Callback or base::{Once,Repeating}Closure should
+// #include "base/callback_forward.h" instead of this file.
+
+#ifndef BASE_CALLBACK_H_
+#define BASE_CALLBACK_H_
+
+#include <stddef.h>
+
+#include "base/callback_forward.h"
+#include "base/callback_internal.h"
+
+// -----------------------------------------------------------------------------
+// Usage documentation
+// -----------------------------------------------------------------------------
+//
+// Overview:
+// A callback is similar in concept to a function pointer: it wraps a runnable
+// object such as a function, method, lambda, or even another callback, allowing
+// the runnable object to be invoked later via the callback object.
+//
+// Unlike function pointers, callbacks are created with base::BindOnce() or
+// base::BindRepeating() and support partial function application.
+//
+// A base::OnceCallback may be Run() at most once; a base::RepeatingCallback may
+// be Run() any number of times. |is_null()| is guaranteed to return true for a
+// moved-from callback.
+//
+// // The lambda takes two arguments, but the first argument |x| is bound at
+// // callback creation.
+// base::OnceCallback<int(int)> cb = base::BindOnce([] (int x, int y) {
+// return x + y;
+// }, 1);
+// // Run() only needs the remaining unbound argument |y|.
+// printf("1 + 2 = %d\n", std::move(cb).Run(2)); // Prints 3
+// printf("cb is null? %s\n",
+// cb.is_null() ? "true" : "false"); // Prints true
+// std::move(cb).Run(2); // Crashes since |cb| has already run.
+//
+// Callbacks also support cancellation. A common use is binding the receiver
+// object as a WeakPtr<T>. If that weak pointer is invalidated, calling Run()
+// will be a no-op. Note that |IsCancelled()| and |is_null()| are distinct:
+// simply cancelling a callback will not also make it null.
+//
+// base::Callback is currently a type alias for base::RepeatingCallback. In the
+// future, we expect to flip this to default to base::OnceCallback.
+//
+// See //docs/callback.md for the full documentation.
+
+namespace base {
+
+template <typename R, typename... Args>
+class OnceCallback<R(Args...)> : public internal::CallbackBase {
+ public:
+ using RunType = R(Args...);
+ using PolymorphicInvoke = R (*)(internal::BindStateBase*,
+ internal::PassingType<Args>...);
+
+ constexpr OnceCallback() = default;
+ OnceCallback(std::nullptr_t) = delete;
+
+ explicit OnceCallback(internal::BindStateBase* bind_state)
+ : internal::CallbackBase(bind_state) {}
+
+ OnceCallback(const OnceCallback&) = delete;
+ OnceCallback& operator=(const OnceCallback&) = delete;
+
+ OnceCallback(OnceCallback&&) noexcept = default;
+ OnceCallback& operator=(OnceCallback&&) noexcept = default;
+
+ OnceCallback(RepeatingCallback<RunType> other)
+ : internal::CallbackBase(std::move(other)) {}
+
+ OnceCallback& operator=(RepeatingCallback<RunType> other) {
+ static_cast<internal::CallbackBase&>(*this) = std::move(other);
+ return *this;
+ }
+
+ R Run(Args... args) const & {
+ static_assert(!sizeof(*this),
+ "OnceCallback::Run() may only be invoked on a non-const "
+ "rvalue, i.e. std::move(callback).Run().");
+ NOTREACHED();
+ }
+
+ R Run(Args... args) && {
+ // Move the callback instance into a local variable before the invocation,
+ // that ensures the internal state is cleared after the invocation.
+ // It's not safe to touch |this| after the invocation, since running the
+ // bound function may destroy |this|.
+ OnceCallback cb = std::move(*this);
+ PolymorphicInvoke f =
+ reinterpret_cast<PolymorphicInvoke>(cb.polymorphic_invoke());
+ return f(cb.bind_state_.get(), std::forward<Args>(args)...);
+ }
+};
+
+template <typename R, typename... Args>
+class RepeatingCallback<R(Args...)> : public internal::CallbackBaseCopyable {
+ public:
+ using RunType = R(Args...);
+ using PolymorphicInvoke = R (*)(internal::BindStateBase*,
+ internal::PassingType<Args>...);
+
+ constexpr RepeatingCallback() = default;
+ RepeatingCallback(std::nullptr_t) = delete;
+
+ explicit RepeatingCallback(internal::BindStateBase* bind_state)
+ : internal::CallbackBaseCopyable(bind_state) {}
+
+ // Copyable and movable.
+ RepeatingCallback(const RepeatingCallback&) = default;
+ RepeatingCallback& operator=(const RepeatingCallback&) = default;
+ RepeatingCallback(RepeatingCallback&&) noexcept = default;
+ RepeatingCallback& operator=(RepeatingCallback&&) noexcept = default;
+
+ bool operator==(const RepeatingCallback& other) const {
+ return EqualsInternal(other);
+ }
+
+ bool operator!=(const RepeatingCallback& other) const {
+ return !operator==(other);
+ }
+
+ R Run(Args... args) const & {
+ PolymorphicInvoke f =
+ reinterpret_cast<PolymorphicInvoke>(this->polymorphic_invoke());
+ return f(this->bind_state_.get(), std::forward<Args>(args)...);
+ }
+
+ R Run(Args... args) && {
+ // Move the callback instance into a local variable before the invocation,
+ // that ensures the internal state is cleared after the invocation.
+ // It's not safe to touch |this| after the invocation, since running the
+ // bound function may destroy |this|.
+ RepeatingCallback cb = std::move(*this);
+ PolymorphicInvoke f =
+ reinterpret_cast<PolymorphicInvoke>(cb.polymorphic_invoke());
+ return f(std::move(cb).bind_state_.get(), std::forward<Args>(args)...);
+ }
+};
+
+} // namespace base
+
+#endif // BASE_CALLBACK_H_
diff --git a/security/sandbox/chromium/base/callback_forward.h b/security/sandbox/chromium/base/callback_forward.h
new file mode 100644
index 0000000000..d0f230cedb
--- /dev/null
+++ b/security/sandbox/chromium/base/callback_forward.h
@@ -0,0 +1,28 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CALLBACK_FORWARD_H_
+#define BASE_CALLBACK_FORWARD_H_
+
+namespace base {
+
+template <typename Signature>
+class OnceCallback;
+
+template <typename Signature>
+class RepeatingCallback;
+
+template <typename Signature>
+using Callback = RepeatingCallback<Signature>;
+
+// Syntactic sugar to make OnceClosure<void()> and RepeatingClosure<void()>
+// easier to declare since they will be used in a lot of APIs with delayed
+// execution.
+using OnceClosure = OnceCallback<void()>;
+using RepeatingClosure = RepeatingCallback<void()>;
+using Closure = Callback<void()>;
+
+} // namespace base
+
+#endif // BASE_CALLBACK_FORWARD_H_
diff --git a/security/sandbox/chromium/base/callback_internal.cc b/security/sandbox/chromium/base/callback_internal.cc
new file mode 100644
index 0000000000..d710682e1f
--- /dev/null
+++ b/security/sandbox/chromium/base/callback_internal.cc
@@ -0,0 +1,101 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/callback_internal.h"
+
+#include "base/logging.h"
+
+namespace base {
+namespace internal {
+
+namespace {
+
+bool QueryCancellationTraitsForNonCancellables(
+ const BindStateBase*,
+ BindStateBase::CancellationQueryMode mode) {
+ switch (mode) {
+ case BindStateBase::IS_CANCELLED:
+ return false;
+ case BindStateBase::MAYBE_VALID:
+ return true;
+ }
+ NOTREACHED();
+ return false;
+}
+
+} // namespace
+
+void BindStateBaseRefCountTraits::Destruct(const BindStateBase* bind_state) {
+ bind_state->destructor_(bind_state);
+}
+
+BindStateBase::BindStateBase(InvokeFuncStorage polymorphic_invoke,
+ void (*destructor)(const BindStateBase*))
+ : BindStateBase(polymorphic_invoke,
+ destructor,
+ &QueryCancellationTraitsForNonCancellables) {}
+
+BindStateBase::BindStateBase(
+ InvokeFuncStorage polymorphic_invoke,
+ void (*destructor)(const BindStateBase*),
+ bool (*query_cancellation_traits)(const BindStateBase*,
+ CancellationQueryMode))
+ : polymorphic_invoke_(polymorphic_invoke),
+ destructor_(destructor),
+ query_cancellation_traits_(query_cancellation_traits) {}
+
+CallbackBase& CallbackBase::operator=(CallbackBase&& c) noexcept = default;
+CallbackBase::CallbackBase(const CallbackBaseCopyable& c)
+ : bind_state_(c.bind_state_) {}
+
+CallbackBase& CallbackBase::operator=(const CallbackBaseCopyable& c) {
+ bind_state_ = c.bind_state_;
+ return *this;
+}
+
+CallbackBase::CallbackBase(CallbackBaseCopyable&& c) noexcept
+ : bind_state_(std::move(c.bind_state_)) {}
+
+CallbackBase& CallbackBase::operator=(CallbackBaseCopyable&& c) noexcept {
+ bind_state_ = std::move(c.bind_state_);
+ return *this;
+}
+
+void CallbackBase::Reset() {
+ // NULL the bind_state_ last, since it may be holding the last ref to whatever
+ // object owns us, and we may be deleted after that.
+ bind_state_ = nullptr;
+}
+
+bool CallbackBase::IsCancelled() const {
+ DCHECK(bind_state_);
+ return bind_state_->IsCancelled();
+}
+
+bool CallbackBase::MaybeValid() const {
+ DCHECK(bind_state_);
+ return bind_state_->MaybeValid();
+}
+
+bool CallbackBase::EqualsInternal(const CallbackBase& other) const {
+ return bind_state_ == other.bind_state_;
+}
+
+CallbackBase::~CallbackBase() = default;
+
+CallbackBaseCopyable::CallbackBaseCopyable(const CallbackBaseCopyable& c) {
+ bind_state_ = c.bind_state_;
+}
+
+CallbackBaseCopyable& CallbackBaseCopyable::operator=(
+ const CallbackBaseCopyable& c) {
+ bind_state_ = c.bind_state_;
+ return *this;
+}
+
+CallbackBaseCopyable& CallbackBaseCopyable::operator=(
+ CallbackBaseCopyable&& c) noexcept = default;
+
+} // namespace internal
+} // namespace base
diff --git a/security/sandbox/chromium/base/callback_internal.h b/security/sandbox/chromium/base/callback_internal.h
new file mode 100644
index 0000000000..fdfdf7f817
--- /dev/null
+++ b/security/sandbox/chromium/base/callback_internal.h
@@ -0,0 +1,194 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains utility functions and classes that help the
+// implementation, and management of the Callback objects.
+
+#ifndef BASE_CALLBACK_INTERNAL_H_
+#define BASE_CALLBACK_INTERNAL_H_
+
+#include "base/base_export.h"
+#include "base/callback_forward.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+
+namespace base {
+
+struct FakeBindState;
+
+namespace internal {
+
+class BindStateBase;
+class FinallyExecutorCommon;
+class ThenAndCatchExecutorCommon;
+
+template <typename ReturnType>
+class PostTaskExecutor;
+
+template <typename Functor, typename... BoundArgs>
+struct BindState;
+
+class CallbackBase;
+class CallbackBaseCopyable;
+
+struct BindStateBaseRefCountTraits {
+ static void Destruct(const BindStateBase*);
+};
+
+template <typename T>
+using PassingType = std::conditional_t<std::is_scalar<T>::value, T, T&&>;
+
+// BindStateBase is used to provide an opaque handle that the Callback
+// class can use to represent a function object with bound arguments. It
+// behaves as an existential type that is used by a corresponding
+// DoInvoke function to perform the function execution. This allows
+// us to shield the Callback class from the types of the bound argument via
+// "type erasure."
+// At the base level, the only task is to add reference counting data. Avoid
+// using or inheriting any virtual functions. Creating a vtable for every
+// BindState template instantiation results in a lot of bloat. Its only task is
+// to call the destructor which can be done with a function pointer.
+class BASE_EXPORT BindStateBase
+ : public RefCountedThreadSafe<BindStateBase, BindStateBaseRefCountTraits> {
+ public:
+ REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE();
+
+ enum CancellationQueryMode {
+ IS_CANCELLED,
+ MAYBE_VALID,
+ };
+
+ using InvokeFuncStorage = void(*)();
+
+ private:
+ BindStateBase(InvokeFuncStorage polymorphic_invoke,
+ void (*destructor)(const BindStateBase*));
+ BindStateBase(InvokeFuncStorage polymorphic_invoke,
+ void (*destructor)(const BindStateBase*),
+ bool (*query_cancellation_traits)(const BindStateBase*,
+ CancellationQueryMode mode));
+
+ ~BindStateBase() = default;
+
+ friend struct BindStateBaseRefCountTraits;
+ friend class RefCountedThreadSafe<BindStateBase, BindStateBaseRefCountTraits>;
+
+ friend class CallbackBase;
+ friend class CallbackBaseCopyable;
+
+ // Whitelist subclasses that access the destructor of BindStateBase.
+ template <typename Functor, typename... BoundArgs>
+ friend struct BindState;
+ friend struct ::base::FakeBindState;
+
+ bool IsCancelled() const {
+ return query_cancellation_traits_(this, IS_CANCELLED);
+ }
+
+ bool MaybeValid() const {
+ return query_cancellation_traits_(this, MAYBE_VALID);
+ }
+
+ // In C++, it is safe to cast function pointers to function pointers of
+ // another type. It is not okay to use void*. We create a InvokeFuncStorage
+ // that that can store our function pointer, and then cast it back to
+ // the original type on usage.
+ InvokeFuncStorage polymorphic_invoke_;
+
+ // Pointer to a function that will properly destroy |this|.
+ void (*destructor_)(const BindStateBase*);
+ bool (*query_cancellation_traits_)(const BindStateBase*,
+ CancellationQueryMode mode);
+
+ DISALLOW_COPY_AND_ASSIGN(BindStateBase);
+};
+
+// Holds the Callback methods that don't require specialization to reduce
+// template bloat.
+// CallbackBase<MoveOnly> is a direct base class of MoveOnly callbacks, and
+// CallbackBase<Copyable> uses CallbackBase<MoveOnly> for its implementation.
+class BASE_EXPORT CallbackBase {
+ public:
+ inline CallbackBase(CallbackBase&& c) noexcept;
+ CallbackBase& operator=(CallbackBase&& c) noexcept;
+
+ explicit CallbackBase(const CallbackBaseCopyable& c);
+ CallbackBase& operator=(const CallbackBaseCopyable& c);
+
+ explicit CallbackBase(CallbackBaseCopyable&& c) noexcept;
+ CallbackBase& operator=(CallbackBaseCopyable&& c) noexcept;
+
+ // Returns true if Callback is null (doesn't refer to anything).
+ bool is_null() const { return !bind_state_; }
+ explicit operator bool() const { return !is_null(); }
+
+ // Returns true if the callback invocation will be nop due to an cancellation.
+ // It's invalid to call this on uninitialized callback.
+ //
+ // Must be called on the Callback's destination sequence.
+ bool IsCancelled() const;
+
+ // If this returns false, the callback invocation will be a nop due to a
+ // cancellation. This may(!) still return true, even on a cancelled callback.
+ //
+ // This function is thread-safe.
+ bool MaybeValid() const;
+
+ // Returns the Callback into an uninitialized state.
+ void Reset();
+
+ protected:
+ friend class FinallyExecutorCommon;
+ friend class ThenAndCatchExecutorCommon;
+
+ template <typename ReturnType>
+ friend class PostTaskExecutor;
+
+ using InvokeFuncStorage = BindStateBase::InvokeFuncStorage;
+
+ // Returns true if this callback equals |other|. |other| may be null.
+ bool EqualsInternal(const CallbackBase& other) const;
+
+ constexpr inline CallbackBase();
+
+ // Allow initializing of |bind_state_| via the constructor to avoid default
+ // initialization of the scoped_refptr.
+ explicit inline CallbackBase(BindStateBase* bind_state);
+
+ InvokeFuncStorage polymorphic_invoke() const {
+ return bind_state_->polymorphic_invoke_;
+ }
+
+ // Force the destructor to be instantiated inside this translation unit so
+ // that our subclasses will not get inlined versions. Avoids more template
+ // bloat.
+ ~CallbackBase();
+
+ scoped_refptr<BindStateBase> bind_state_;
+};
+
+constexpr CallbackBase::CallbackBase() = default;
+CallbackBase::CallbackBase(CallbackBase&&) noexcept = default;
+CallbackBase::CallbackBase(BindStateBase* bind_state)
+ : bind_state_(AdoptRef(bind_state)) {}
+
+// CallbackBase<Copyable> is a direct base class of Copyable Callbacks.
+class BASE_EXPORT CallbackBaseCopyable : public CallbackBase {
+ public:
+ CallbackBaseCopyable(const CallbackBaseCopyable& c);
+ CallbackBaseCopyable(CallbackBaseCopyable&& c) noexcept = default;
+ CallbackBaseCopyable& operator=(const CallbackBaseCopyable& c);
+ CallbackBaseCopyable& operator=(CallbackBaseCopyable&& c) noexcept;
+
+ protected:
+ constexpr CallbackBaseCopyable() = default;
+ explicit CallbackBaseCopyable(BindStateBase* bind_state)
+ : CallbackBase(bind_state) {}
+ ~CallbackBaseCopyable() = default;
+};
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_CALLBACK_INTERNAL_H_
diff --git a/security/sandbox/chromium/base/compiler_specific.h b/security/sandbox/chromium/base/compiler_specific.h
new file mode 100644
index 0000000000..5f931c8704
--- /dev/null
+++ b/security/sandbox/chromium/base/compiler_specific.h
@@ -0,0 +1,298 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_COMPILER_SPECIFIC_H_
+#define BASE_COMPILER_SPECIFIC_H_
+
+#include "build/build_config.h"
+
+#if defined(COMPILER_MSVC)
+
+#if !defined(__clang__)
+#error "Only clang-cl is supported on Windows, see https://crbug.com/988071"
+#endif
+
+// Macros for suppressing and disabling warnings on MSVC.
+//
+// Warning numbers are enumerated at:
+// http://msdn.microsoft.com/en-us/library/8x5x43k7(VS.80).aspx
+//
+// The warning pragma:
+// http://msdn.microsoft.com/en-us/library/2c8f766e(VS.80).aspx
+//
+// Using __pragma instead of #pragma inside macros:
+// http://msdn.microsoft.com/en-us/library/d9x1s805.aspx
+
+// MSVC_PUSH_DISABLE_WARNING pushes |n| onto a stack of warnings to be disabled.
+// The warning remains disabled until popped by MSVC_POP_WARNING.
+#define MSVC_PUSH_DISABLE_WARNING(n) \
+ __pragma(warning(push)) __pragma(warning(disable : n))
+
+// Pop effects of innermost MSVC_PUSH_* macro.
+#define MSVC_POP_WARNING() __pragma(warning(pop))
+
+#else // Not MSVC
+
+#define MSVC_PUSH_DISABLE_WARNING(n)
+#define MSVC_POP_WARNING()
+#define MSVC_DISABLE_OPTIMIZE()
+#define MSVC_ENABLE_OPTIMIZE()
+
+#endif // COMPILER_MSVC
+
+// These macros can be helpful when investigating compiler bugs or when
+// investigating issues in local optimized builds, by temporarily disabling
+// optimizations for a single function or file. These macros should never be
+// used to permanently work around compiler bugs or other mysteries, and should
+// not be used in landed changes.
+#if !defined(OFFICIAL_BUILD)
+#if defined(__clang__)
+#define DISABLE_OPTIMIZE() __pragma(clang optimize off)
+#define ENABLE_OPTIMIZE() __pragma(clang optimize on)
+#elif defined(COMPILER_MSVC)
+#define DISABLE_OPTIMIZE() __pragma(optimize("", off))
+#define ENABLE_OPTIMIZE() __pragma(optimize("", on))
+#else
+// These macros are not currently available for other compiler options.
+#endif
+// These macros are not available in official builds.
+#endif // !defined(OFFICIAL_BUILD)
+
+// Annotate a variable indicating it's ok if the variable is not used.
+// (Typically used to silence a compiler warning when the assignment
+// is important for some other reason.)
+// Use like:
+// int x = ...;
+// ALLOW_UNUSED_LOCAL(x);
+#define ALLOW_UNUSED_LOCAL(x) (void)x
+
+// Annotate a typedef or function indicating it's ok if it's not used.
+// Use like:
+// typedef Foo Bar ALLOW_UNUSED_TYPE;
+#if defined(COMPILER_GCC) || defined(__clang__)
+#define ALLOW_UNUSED_TYPE __attribute__((unused))
+#else
+#define ALLOW_UNUSED_TYPE
+#endif
+
+// Annotate a function indicating it should not be inlined.
+// Use like:
+// NOINLINE void DoStuff() { ... }
+#if defined(COMPILER_GCC)
+#define NOINLINE __attribute__((noinline))
+#elif defined(COMPILER_MSVC)
+#define NOINLINE __declspec(noinline)
+#else
+#define NOINLINE
+#endif
+
+#if defined(COMPILER_GCC) && defined(NDEBUG)
+#define ALWAYS_INLINE inline __attribute__((__always_inline__))
+#elif defined(COMPILER_MSVC) && defined(NDEBUG)
+#define ALWAYS_INLINE __forceinline
+#else
+#define ALWAYS_INLINE inline
+#endif
+
+// Specify memory alignment for structs, classes, etc.
+// Use like:
+// class ALIGNAS(16) MyClass { ... }
+// ALIGNAS(16) int array[4];
+//
+// In most places you can use the C++11 keyword "alignas", which is preferred.
+//
+// But compilers have trouble mixing __attribute__((...)) syntax with
+// alignas(...) syntax.
+//
+// Doesn't work in clang or gcc:
+// struct alignas(16) __attribute__((packed)) S { char c; };
+// Works in clang but not gcc:
+// struct __attribute__((packed)) alignas(16) S2 { char c; };
+// Works in clang and gcc:
+// struct alignas(16) S3 { char c; } __attribute__((packed));
+//
+// There are also some attributes that must be specified *before* a class
+// definition: visibility (used for exporting functions/classes) is one of
+// these attributes. This means that it is not possible to use alignas() with a
+// class that is marked as exported.
+#if defined(COMPILER_MSVC)
+#define ALIGNAS(byte_alignment) __declspec(align(byte_alignment))
+#elif defined(COMPILER_GCC)
+#define ALIGNAS(byte_alignment) __attribute__((aligned(byte_alignment)))
+#endif
+
+// Annotate a function indicating the caller must examine the return value.
+// Use like:
+// int foo() WARN_UNUSED_RESULT;
+// To explicitly ignore a result, see |ignore_result()| in base/macros.h.
+#undef WARN_UNUSED_RESULT
+#if defined(COMPILER_GCC) || defined(__clang__)
+#define WARN_UNUSED_RESULT __attribute__((warn_unused_result))
+#else
+#define WARN_UNUSED_RESULT
+#endif
+
+// Tell the compiler a function is using a printf-style format string.
+// |format_param| is the one-based index of the format string parameter;
+// |dots_param| is the one-based index of the "..." parameter.
+// For v*printf functions (which take a va_list), pass 0 for dots_param.
+// (This is undocumented but matches what the system C headers do.)
+// For member functions, the implicit this parameter counts as index 1.
+#if defined(COMPILER_GCC) || defined(__clang__)
+#define PRINTF_FORMAT(format_param, dots_param) \
+ __attribute__((format(printf, format_param, dots_param)))
+#else
+#define PRINTF_FORMAT(format_param, dots_param)
+#endif
+
+// WPRINTF_FORMAT is the same, but for wide format strings.
+// This doesn't appear to yet be implemented in any compiler.
+// See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=38308 .
+#define WPRINTF_FORMAT(format_param, dots_param)
+// If available, it would look like:
+// __attribute__((format(wprintf, format_param, dots_param)))
+
+// Sanitizers annotations.
+#if defined(__has_attribute)
+#if __has_attribute(no_sanitize)
+#define NO_SANITIZE(what) __attribute__((no_sanitize(what)))
+#endif
+#endif
+#if !defined(NO_SANITIZE)
+#define NO_SANITIZE(what)
+#endif
+
+// MemorySanitizer annotations.
+#if defined(MEMORY_SANITIZER) && !defined(OS_NACL)
+#include <sanitizer/msan_interface.h>
+
+// Mark a memory region fully initialized.
+// Use this to annotate code that deliberately reads uninitialized data, for
+// example a GC scavenging root set pointers from the stack.
+#define MSAN_UNPOISON(p, size) __msan_unpoison(p, size)
+
+// Check a memory region for initializedness, as if it was being used here.
+// If any bits are uninitialized, crash with an MSan report.
+// Use this to sanitize data which MSan won't be able to track, e.g. before
+// passing data to another process via shared memory.
+#define MSAN_CHECK_MEM_IS_INITIALIZED(p, size) \
+ __msan_check_mem_is_initialized(p, size)
+#else // MEMORY_SANITIZER
+#define MSAN_UNPOISON(p, size)
+#define MSAN_CHECK_MEM_IS_INITIALIZED(p, size)
+#endif // MEMORY_SANITIZER
+
+// DISABLE_CFI_PERF -- Disable Control Flow Integrity for perf reasons.
+#if !defined(DISABLE_CFI_PERF)
+#if defined(__clang__) && defined(OFFICIAL_BUILD)
+#define DISABLE_CFI_PERF __attribute__((no_sanitize("cfi")))
+#else
+#define DISABLE_CFI_PERF
+#endif
+#endif
+
+// Macro useful for writing cross-platform function pointers.
+#if !defined(CDECL)
+#if defined(OS_WIN)
+#define CDECL __cdecl
+#else // defined(OS_WIN)
+#define CDECL
+#endif // defined(OS_WIN)
+#endif // !defined(CDECL)
+
+// Macro for hinting that an expression is likely to be false.
+#if !defined(UNLIKELY)
+#if defined(COMPILER_GCC) || defined(__clang__)
+#define UNLIKELY(x) __builtin_expect(!!(x), 0)
+#else
+#define UNLIKELY(x) (x)
+#endif // defined(COMPILER_GCC)
+#endif // !defined(UNLIKELY)
+
+#if !defined(LIKELY)
+#if defined(COMPILER_GCC) || defined(__clang__)
+#define LIKELY(x) __builtin_expect(!!(x), 1)
+#else
+#define LIKELY(x) (x)
+#endif // defined(COMPILER_GCC)
+#endif // !defined(LIKELY)
+
+// Compiler feature-detection.
+// clang.llvm.org/docs/LanguageExtensions.html#has-feature-and-has-extension
+#if defined(__has_feature)
+#define HAS_FEATURE(FEATURE) __has_feature(FEATURE)
+#else
+#define HAS_FEATURE(FEATURE) 0
+#endif
+
+// Macro for telling -Wimplicit-fallthrough that a fallthrough is intentional.
+#if defined(__clang__)
+#define FALLTHROUGH [[clang::fallthrough]]
+#else
+#define FALLTHROUGH
+#endif
+
+#if defined(COMPILER_GCC)
+#define PRETTY_FUNCTION __PRETTY_FUNCTION__
+#elif defined(COMPILER_MSVC)
+#define PRETTY_FUNCTION __FUNCSIG__
+#else
+// See https://en.cppreference.com/w/c/language/function_definition#func
+#define PRETTY_FUNCTION __func__
+#endif
+
+#if !defined(CPU_ARM_NEON)
+#if defined(__arm__)
+#if !defined(__ARMEB__) && !defined(__ARM_EABI__) && !defined(__EABI__) && \
+ !defined(__VFP_FP__) && !defined(_WIN32_WCE) && !defined(ANDROID)
+#error Chromium does not support middle endian architecture
+#endif
+#if defined(__ARM_NEON__)
+#define CPU_ARM_NEON 1
+#endif
+#endif // defined(__arm__)
+#endif // !defined(CPU_ARM_NEON)
+
+#if !defined(HAVE_MIPS_MSA_INTRINSICS)
+#if defined(__mips_msa) && defined(__mips_isa_rev) && (__mips_isa_rev >= 5)
+#define HAVE_MIPS_MSA_INTRINSICS 1
+#endif
+#endif
+
+#if defined(__clang__) && __has_attribute(uninitialized)
+// Attribute "uninitialized" disables -ftrivial-auto-var-init=pattern for
+// the specified variable.
+// Library-wide alternative is
+// 'configs -= [ "//build/config/compiler:default_init_stack_vars" ]' in .gn
+// file.
+//
+// See "init_stack_vars" in build/config/compiler/BUILD.gn and
+// http://crbug.com/977230
+// "init_stack_vars" is enabled for non-official builds and we hope to enable it
+// in official build in 2020 as well. The flag writes fixed pattern into
+// uninitialized parts of all local variables. In rare cases such initialization
+// is undesirable and attribute can be used:
+// 1. Degraded performance
+// In most cases compiler is able to remove additional stores. E.g. if memory is
+// never accessed or properly initialized later. Preserved stores mostly will
+// not affect program performance. However if compiler failed on some
+// performance critical code we can get a visible regression in a benchmark.
+// 2. memset, memcpy calls
+// Compiler may replaces some memory writes with memset or memcpy calls. This is
+// not -ftrivial-auto-var-init specific, but it can happen more likely with the
+// flag. It can be a problem if code is not linked with C run-time library.
+//
+// Note: The flag is security risk mitigation feature. So in future the
+// attribute uses should be avoided when possible. However to enable this
+// mitigation on the most of the code we need to be less strict now and minimize
+// number of exceptions later. So if in doubt feel free to use attribute, but
+// please document the problem for someone who is going to cleanup it later.
+// E.g. platform, bot, benchmark or test name in patch description or next to
+// the attribute.
+#define STACK_UNINITIALIZED __attribute__((uninitialized))
+#else
+#define STACK_UNINITIALIZED
+#endif
+
+#endif // BASE_COMPILER_SPECIFIC_H_
diff --git a/security/sandbox/chromium/base/containers/adapters.h b/security/sandbox/chromium/base/containers/adapters.h
new file mode 100644
index 0000000000..ec33481752
--- /dev/null
+++ b/security/sandbox/chromium/base/containers/adapters.h
@@ -0,0 +1,55 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CONTAINERS_ADAPTERS_H_
+#define BASE_CONTAINERS_ADAPTERS_H_
+
+#include <stddef.h>
+
+#include <iterator>
+#include <utility>
+
+#include "base/macros.h"
+
+namespace base {
+
+namespace internal {
+
+// Internal adapter class for implementing base::Reversed.
+template <typename T>
+class ReversedAdapter {
+ public:
+ using Iterator = decltype(std::rbegin(std::declval<T&>()));
+
+ explicit ReversedAdapter(T& t) : t_(t) {}
+ ReversedAdapter(const ReversedAdapter& ra) : t_(ra.t_) {}
+
+ Iterator begin() const { return std::rbegin(t_); }
+ Iterator end() const { return std::rend(t_); }
+
+ private:
+ T& t_;
+
+ DISALLOW_ASSIGN(ReversedAdapter);
+};
+
+} // namespace internal
+
+// Reversed returns a container adapter usable in a range-based "for" statement
+// for iterating a reversible container in reverse order.
+//
+// Example:
+//
+// std::vector<int> v = ...;
+// for (int i : base::Reversed(v)) {
+// // iterates through v from back to front
+// }
+template <typename T>
+internal::ReversedAdapter<T> Reversed(T& t) {
+ return internal::ReversedAdapter<T>(t);
+}
+
+} // namespace base
+
+#endif // BASE_CONTAINERS_ADAPTERS_H_
diff --git a/security/sandbox/chromium/base/containers/buffer_iterator.h b/security/sandbox/chromium/base/containers/buffer_iterator.h
new file mode 100644
index 0000000000..a4fd670190
--- /dev/null
+++ b/security/sandbox/chromium/base/containers/buffer_iterator.h
@@ -0,0 +1,145 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CONTAINERS_BUFFER_ITERATOR_H_
+#define BASE_CONTAINERS_BUFFER_ITERATOR_H_
+
+#include <type_traits>
+
+#include "base/bit_cast.h"
+#include "base/containers/span.h"
+#include "base/numerics/checked_math.h"
+
+namespace base {
+
+// BufferIterator is a bounds-checked container utility to access variable-
+// length, heterogeneous structures contained within a buffer. If the data are
+// homogeneous, use base::span<> instead.
+//
+// After being created with a weakly-owned buffer, BufferIterator returns
+// pointers to structured data within the buffer. After each method call that
+// returns data in the buffer, the iterator position is advanced by the byte
+// size of the object (or span of objects) returned. If there are not enough
+// bytes remaining in the buffer to return the requested object(s), a nullptr
+// or empty span is returned.
+//
+// This class is similar to base::Pickle, which should be preferred for
+// serializing to disk. Pickle versions its header and does not support writing
+// structures, which are problematic for serialization due to struct padding and
+// version shear concerns.
+//
+// Example usage:
+//
+// std::vector<uint8_t> buffer(4096);
+// if (!ReadSomeData(&buffer, buffer.size())) {
+// LOG(ERROR) << "Failed to read data.";
+// return false;
+// }
+//
+// BufferIterator<uint8_t> iterator(buffer);
+// uint32_t* num_items = iterator.Object<uint32_t>();
+// if (!num_items) {
+// LOG(ERROR) << "No num_items field."
+// return false;
+// }
+//
+// base::span<const item_struct> items =
+// iterator.Span<item_struct>(*num_items);
+// if (items.size() != *num_items) {
+// LOG(ERROR) << "Not enough items.";
+// return false;
+// }
+//
+// // ... validate the objects in |items|.
+template <typename B>
+class BufferIterator {
+ public:
+ static_assert(std::is_same<std::remove_const_t<B>, char>::value ||
+ std::is_same<std::remove_const_t<B>, unsigned char>::value,
+ "Underlying buffer type must be char-type.");
+
+ BufferIterator() {}
+ BufferIterator(B* data, size_t size)
+ : BufferIterator(make_span(data, size)) {}
+ explicit BufferIterator(span<B> buffer)
+ : buffer_(buffer), remaining_(buffer) {}
+ ~BufferIterator() {}
+
+ // Returns a pointer to a mutable structure T in the buffer at the current
+ // position. On success, the iterator position is advanced by sizeof(T). If
+ // there are not sizeof(T) bytes remaining in the buffer, returns nullptr.
+ template <typename T,
+ typename =
+ typename std::enable_if_t<std::is_trivially_copyable<T>::value>>
+ T* MutableObject() {
+ size_t size = sizeof(T);
+ size_t next_position;
+ if (!CheckAdd(position(), size).AssignIfValid(&next_position))
+ return nullptr;
+ if (next_position > total_size())
+ return nullptr;
+ T* t = bit_cast<T*>(remaining_.data());
+ remaining_ = remaining_.subspan(size);
+ return t;
+ }
+
+ // Returns a const pointer to an object of type T in the buffer at the current
+ // position.
+ template <typename T,
+ typename =
+ typename std::enable_if_t<std::is_trivially_copyable<T>::value>>
+ const T* Object() {
+ return MutableObject<const T>();
+ }
+
+ // Returns a span of |count| T objects in the buffer at the current position.
+ // On success, the iterator position is advanced by |sizeof(T) * count|. If
+ // there are not enough bytes remaining in the buffer to fulfill the request,
+ // returns an empty span.
+ template <typename T,
+ typename =
+ typename std::enable_if_t<std::is_trivially_copyable<T>::value>>
+ span<T> MutableSpan(size_t count) {
+ size_t size;
+ if (!CheckMul(sizeof(T), count).AssignIfValid(&size))
+ return span<T>();
+ size_t next_position;
+ if (!CheckAdd(position(), size).AssignIfValid(&next_position))
+ return span<T>();
+ if (next_position > total_size())
+ return span<T>();
+ auto result = span<T>(bit_cast<T*>(remaining_.data()), count);
+ remaining_ = remaining_.subspan(size);
+ return result;
+ }
+
+ // Returns a span to |count| const objects of type T in the buffer at the
+ // current position.
+ template <typename T,
+ typename =
+ typename std::enable_if_t<std::is_trivially_copyable<T>::value>>
+ span<const T> Span(size_t count) {
+ return MutableSpan<const T>(count);
+ }
+
+ // Resets the iterator position to the absolute offset |to|.
+ void Seek(size_t to) { remaining_ = buffer_.subspan(to); }
+
+ // Returns the total size of the underlying buffer.
+ size_t total_size() { return buffer_.size(); }
+
+ // Returns the current position in the buffer.
+ size_t position() { return buffer_.size_bytes() - remaining_.size_bytes(); }
+
+ private:
+ // The original buffer that the iterator was constructed with.
+ const span<B> buffer_;
+ // A subspan of |buffer_| containing the remaining bytes to iterate over.
+ span<B> remaining_;
+ // Copy and assign allowed.
+};
+
+} // namespace base
+
+#endif // BASE_CONTAINERS_BUFFER_ITERATOR_H_
diff --git a/security/sandbox/chromium/base/containers/checked_iterators.h b/security/sandbox/chromium/base/containers/checked_iterators.h
new file mode 100644
index 0000000000..cdfd2909d4
--- /dev/null
+++ b/security/sandbox/chromium/base/containers/checked_iterators.h
@@ -0,0 +1,205 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CONTAINERS_CHECKED_ITERATORS_H_
+#define BASE_CONTAINERS_CHECKED_ITERATORS_H_
+
+#include <iterator>
+#include <memory>
+#include <type_traits>
+
+#include "base/containers/util.h"
+#include "base/logging.h"
+
+namespace base {
+
+template <typename T>
+class CheckedContiguousIterator {
+ public:
+ using difference_type = std::ptrdiff_t;
+ using value_type = std::remove_cv_t<T>;
+ using pointer = T*;
+ using reference = T&;
+ using iterator_category = std::random_access_iterator_tag;
+
+ // Required for converting constructor below.
+ template <typename U>
+ friend class CheckedContiguousIterator;
+
+ constexpr CheckedContiguousIterator() = default;
+ constexpr CheckedContiguousIterator(T* start, const T* end)
+ : CheckedContiguousIterator(start, start, end) {}
+ constexpr CheckedContiguousIterator(const T* start, T* current, const T* end)
+ : start_(start), current_(current), end_(end) {
+ CHECK_LE(start, current);
+ CHECK_LE(current, end);
+ }
+ constexpr CheckedContiguousIterator(const CheckedContiguousIterator& other) =
+ default;
+
+ // Converting constructor allowing conversions like CCI<T> to CCI<const T>,
+ // but disallowing CCI<const T> to CCI<T> or CCI<Derived> to CCI<Base>, which
+ // are unsafe. Furthermore, this is the same condition as used by the
+ // converting constructors of std::span<T> and std::unique_ptr<T[]>.
+ // See https://wg21.link/n4042 for details.
+ template <
+ typename U,
+ std::enable_if_t<std::is_convertible<U (*)[], T (*)[]>::value>* = nullptr>
+ constexpr CheckedContiguousIterator(const CheckedContiguousIterator<U>& other)
+ : start_(other.start_), current_(other.current_), end_(other.end_) {
+ // We explicitly don't delegate to the 3-argument constructor here. Its
+ // CHECKs would be redundant, since we expect |other| to maintain its own
+ // invariant. However, DCHECKs never hurt anybody. Presumably.
+ DCHECK_LE(other.start_, other.current_);
+ DCHECK_LE(other.current_, other.end_);
+ }
+
+ ~CheckedContiguousIterator() = default;
+
+ constexpr CheckedContiguousIterator& operator=(
+ const CheckedContiguousIterator& other) = default;
+
+ constexpr bool operator==(const CheckedContiguousIterator& other) const {
+ CheckComparable(other);
+ return current_ == other.current_;
+ }
+
+ constexpr bool operator!=(const CheckedContiguousIterator& other) const {
+ CheckComparable(other);
+ return current_ != other.current_;
+ }
+
+ constexpr bool operator<(const CheckedContiguousIterator& other) const {
+ CheckComparable(other);
+ return current_ < other.current_;
+ }
+
+ constexpr bool operator<=(const CheckedContiguousIterator& other) const {
+ CheckComparable(other);
+ return current_ <= other.current_;
+ }
+
+ constexpr bool operator>(const CheckedContiguousIterator& other) const {
+ CheckComparable(other);
+ return current_ > other.current_;
+ }
+
+ constexpr bool operator>=(const CheckedContiguousIterator& other) const {
+ CheckComparable(other);
+ return current_ >= other.current_;
+ }
+
+ constexpr CheckedContiguousIterator& operator++() {
+ CHECK_NE(current_, end_);
+ ++current_;
+ return *this;
+ }
+
+ constexpr CheckedContiguousIterator operator++(int) {
+ CheckedContiguousIterator old = *this;
+ ++*this;
+ return old;
+ }
+
+ constexpr CheckedContiguousIterator& operator--() {
+ CHECK_NE(current_, start_);
+ --current_;
+ return *this;
+ }
+
+ constexpr CheckedContiguousIterator operator--(int) {
+ CheckedContiguousIterator old = *this;
+ --*this;
+ return old;
+ }
+
+ constexpr CheckedContiguousIterator& operator+=(difference_type rhs) {
+ if (rhs > 0) {
+ CHECK_LE(rhs, end_ - current_);
+ } else {
+ CHECK_LE(-rhs, current_ - start_);
+ }
+ current_ += rhs;
+ return *this;
+ }
+
+ constexpr CheckedContiguousIterator operator+(difference_type rhs) const {
+ CheckedContiguousIterator it = *this;
+ it += rhs;
+ return it;
+ }
+
+ constexpr CheckedContiguousIterator& operator-=(difference_type rhs) {
+ if (rhs < 0) {
+ CHECK_LE(-rhs, end_ - current_);
+ } else {
+ CHECK_LE(rhs, current_ - start_);
+ }
+ current_ -= rhs;
+ return *this;
+ }
+
+ constexpr CheckedContiguousIterator operator-(difference_type rhs) const {
+ CheckedContiguousIterator it = *this;
+ it -= rhs;
+ return it;
+ }
+
+ constexpr friend difference_type operator-(
+ const CheckedContiguousIterator& lhs,
+ const CheckedContiguousIterator& rhs) {
+ CHECK_EQ(lhs.start_, rhs.start_);
+ CHECK_EQ(lhs.end_, rhs.end_);
+ return lhs.current_ - rhs.current_;
+ }
+
+ constexpr reference operator*() const {
+ CHECK_NE(current_, end_);
+ return *current_;
+ }
+
+ constexpr pointer operator->() const {
+ CHECK_NE(current_, end_);
+ return current_;
+ }
+
+ constexpr reference operator[](difference_type rhs) const {
+ CHECK_GE(rhs, 0);
+ CHECK_LT(rhs, end_ - current_);
+ return current_[rhs];
+ }
+
+ static bool IsRangeMoveSafe(const CheckedContiguousIterator& from_begin,
+ const CheckedContiguousIterator& from_end,
+ const CheckedContiguousIterator& to)
+ WARN_UNUSED_RESULT {
+ if (from_end < from_begin)
+ return false;
+ const auto from_begin_uintptr = get_uintptr(from_begin.current_);
+ const auto from_end_uintptr = get_uintptr(from_end.current_);
+ const auto to_begin_uintptr = get_uintptr(to.current_);
+ const auto to_end_uintptr =
+ get_uintptr((to + std::distance(from_begin, from_end)).current_);
+
+ return to_begin_uintptr >= from_end_uintptr ||
+ to_end_uintptr <= from_begin_uintptr;
+ }
+
+ private:
+ constexpr void CheckComparable(const CheckedContiguousIterator& other) const {
+ CHECK_EQ(start_, other.start_);
+ CHECK_EQ(end_, other.end_);
+ }
+
+ const T* start_ = nullptr;
+ T* current_ = nullptr;
+ const T* end_ = nullptr;
+};
+
+template <typename T>
+using CheckedContiguousConstIterator = CheckedContiguousIterator<const T>;
+
+} // namespace base
+
+#endif // BASE_CONTAINERS_CHECKED_ITERATORS_H_
diff --git a/security/sandbox/chromium/base/containers/circular_deque.h b/security/sandbox/chromium/base/containers/circular_deque.h
new file mode 100644
index 0000000000..0d452b56be
--- /dev/null
+++ b/security/sandbox/chromium/base/containers/circular_deque.h
@@ -0,0 +1,1112 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CONTAINERS_CIRCULAR_DEQUE_H_
+#define BASE_CONTAINERS_CIRCULAR_DEQUE_H_
+
+#include <algorithm>
+#include <cstddef>
+#include <iterator>
+#include <type_traits>
+#include <utility>
+
+#include "base/containers/vector_buffer.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/stl_util.h"
+#include "base/template_util.h"
+
+// base::circular_deque is similar to std::deque. Unlike std::deque, the
+// storage is provided in a flat circular buffer conceptually similar to a
+// vector. The beginning and end will wrap around as necessary so that
+// pushes and pops will be constant time as long as a capacity expansion is
+// not required.
+//
+// The API should be identical to std::deque with the following differences:
+//
+// - ITERATORS ARE NOT STABLE. Mutating the container will invalidate all
+// iterators.
+//
+// - Insertions may resize the vector and so are not constant time (std::deque
+// guarantees constant time for insertions at the ends).
+//
+// - Container-wide comparisons are not implemented. If you want to compare
+// two containers, use an algorithm so the expensive iteration is explicit.
+//
+// If you want a similar container with only a queue API, use base::queue in
+// base/containers/queue.h.
+//
+// Constructors:
+// circular_deque();
+// circular_deque(size_t count);
+// circular_deque(size_t count, const T& value);
+// circular_deque(InputIterator first, InputIterator last);
+// circular_deque(const circular_deque&);
+// circular_deque(circular_deque&&);
+// circular_deque(std::initializer_list<value_type>);
+//
+// Assignment functions:
+// circular_deque& operator=(const circular_deque&);
+// circular_deque& operator=(circular_deque&&);
+// circular_deque& operator=(std::initializer_list<T>);
+// void assign(size_t count, const T& value);
+// void assign(InputIterator first, InputIterator last);
+// void assign(std::initializer_list<T> value);
+//
+// Random accessors:
+// T& at(size_t);
+// const T& at(size_t) const;
+// T& operator[](size_t);
+// const T& operator[](size_t) const;
+//
+// End accessors:
+// T& front();
+// const T& front() const;
+// T& back();
+// const T& back() const;
+//
+// Iterator functions:
+// iterator begin();
+// const_iterator begin() const;
+// const_iterator cbegin() const;
+// iterator end();
+// const_iterator end() const;
+// const_iterator cend() const;
+// reverse_iterator rbegin();
+// const_reverse_iterator rbegin() const;
+// const_reverse_iterator crbegin() const;
+// reverse_iterator rend();
+// const_reverse_iterator rend() const;
+// const_reverse_iterator crend() const;
+//
+// Memory management:
+// void reserve(size_t); // SEE IMPLEMENTATION FOR SOME GOTCHAS.
+// size_t capacity() const;
+// void shrink_to_fit();
+//
+// Size management:
+// void clear();
+// bool empty() const;
+// size_t size() const;
+// void resize(size_t);
+// void resize(size_t count, const T& value);
+//
+// Positional insert and erase:
+// void insert(const_iterator pos, size_type count, const T& value);
+// void insert(const_iterator pos,
+// InputIterator first, InputIterator last);
+// iterator insert(const_iterator pos, const T& value);
+// iterator insert(const_iterator pos, T&& value);
+// iterator emplace(const_iterator pos, Args&&... args);
+// iterator erase(const_iterator pos);
+// iterator erase(const_iterator first, const_iterator last);
+//
+// End insert and erase:
+// void push_front(const T&);
+// void push_front(T&&);
+// void push_back(const T&);
+// void push_back(T&&);
+// T& emplace_front(Args&&...);
+// T& emplace_back(Args&&...);
+// void pop_front();
+// void pop_back();
+//
+// General:
+// void swap(circular_deque&);
+
+namespace base {
+
+template <class T>
+class circular_deque;
+
+namespace internal {
+
+// Start allocating nonempty buffers with this many entries. This is the
+// external capacity so the internal buffer will be one larger (= 4) which is
+// more even for the allocator. See the descriptions of internal vs. external
+// capacity on the comment above the buffer_ variable below.
+constexpr size_t kCircularBufferInitialCapacity = 3;
+
+template <typename T>
+class circular_deque_const_iterator {
+ public:
+ using difference_type = std::ptrdiff_t;
+ using value_type = T;
+ using pointer = const T*;
+ using reference = const T&;
+ using iterator_category = std::random_access_iterator_tag;
+
+ circular_deque_const_iterator() : parent_deque_(nullptr), index_(0) {
+#if DCHECK_IS_ON()
+ created_generation_ = 0;
+#endif // DCHECK_IS_ON()
+ }
+
+ // Dereferencing.
+ const T& operator*() const {
+ CheckUnstableUsage();
+ parent_deque_->CheckValidIndex(index_);
+ return parent_deque_->buffer_[index_];
+ }
+ const T* operator->() const {
+ CheckUnstableUsage();
+ parent_deque_->CheckValidIndex(index_);
+ return &parent_deque_->buffer_[index_];
+ }
+ const value_type& operator[](difference_type i) const { return *(*this + i); }
+
+ // Increment and decrement.
+ circular_deque_const_iterator& operator++() {
+ Increment();
+ return *this;
+ }
+ circular_deque_const_iterator operator++(int) {
+ circular_deque_const_iterator ret = *this;
+ Increment();
+ return ret;
+ }
+ circular_deque_const_iterator& operator--() {
+ Decrement();
+ return *this;
+ }
+ circular_deque_const_iterator operator--(int) {
+ circular_deque_const_iterator ret = *this;
+ Decrement();
+ return ret;
+ }
+
+ // Random access mutation.
+ friend circular_deque_const_iterator operator+(
+ const circular_deque_const_iterator& iter,
+ difference_type offset) {
+ circular_deque_const_iterator ret = iter;
+ ret.Add(offset);
+ return ret;
+ }
+ circular_deque_const_iterator& operator+=(difference_type offset) {
+ Add(offset);
+ return *this;
+ }
+ friend circular_deque_const_iterator operator-(
+ const circular_deque_const_iterator& iter,
+ difference_type offset) {
+ circular_deque_const_iterator ret = iter;
+ ret.Add(-offset);
+ return ret;
+ }
+ circular_deque_const_iterator& operator-=(difference_type offset) {
+ Add(-offset);
+ return *this;
+ }
+
+ friend std::ptrdiff_t operator-(const circular_deque_const_iterator& lhs,
+ const circular_deque_const_iterator& rhs) {
+ lhs.CheckComparable(rhs);
+ return lhs.OffsetFromBegin() - rhs.OffsetFromBegin();
+ }
+
+ // Comparisons.
+ friend bool operator==(const circular_deque_const_iterator& lhs,
+ const circular_deque_const_iterator& rhs) {
+ lhs.CheckComparable(rhs);
+ return lhs.index_ == rhs.index_;
+ }
+ friend bool operator!=(const circular_deque_const_iterator& lhs,
+ const circular_deque_const_iterator& rhs) {
+ return !(lhs == rhs);
+ }
+ friend bool operator<(const circular_deque_const_iterator& lhs,
+ const circular_deque_const_iterator& rhs) {
+ lhs.CheckComparable(rhs);
+ return lhs.OffsetFromBegin() < rhs.OffsetFromBegin();
+ }
+ friend bool operator<=(const circular_deque_const_iterator& lhs,
+ const circular_deque_const_iterator& rhs) {
+ return !(lhs > rhs);
+ }
+ friend bool operator>(const circular_deque_const_iterator& lhs,
+ const circular_deque_const_iterator& rhs) {
+ lhs.CheckComparable(rhs);
+ return lhs.OffsetFromBegin() > rhs.OffsetFromBegin();
+ }
+ friend bool operator>=(const circular_deque_const_iterator& lhs,
+ const circular_deque_const_iterator& rhs) {
+ return !(lhs < rhs);
+ }
+
+ protected:
+ friend class circular_deque<T>;
+
+ circular_deque_const_iterator(const circular_deque<T>* parent, size_t index)
+ : parent_deque_(parent), index_(index) {
+#if DCHECK_IS_ON()
+ created_generation_ = parent->generation_;
+#endif // DCHECK_IS_ON()
+ }
+
+ // Returns the offset from the beginning index of the buffer to the current
+ // item.
+ size_t OffsetFromBegin() const {
+ if (index_ >= parent_deque_->begin_)
+ return index_ - parent_deque_->begin_; // On the same side as begin.
+ return parent_deque_->buffer_.capacity() - parent_deque_->begin_ + index_;
+ }
+
+ // Most uses will be ++ and -- so use a simplified implementation.
+ void Increment() {
+ CheckUnstableUsage();
+ parent_deque_->CheckValidIndex(index_);
+ index_++;
+ if (index_ == parent_deque_->buffer_.capacity())
+ index_ = 0;
+ }
+ void Decrement() {
+ CheckUnstableUsage();
+ parent_deque_->CheckValidIndexOrEnd(index_);
+ if (index_ == 0)
+ index_ = parent_deque_->buffer_.capacity() - 1;
+ else
+ index_--;
+ }
+ void Add(difference_type delta) {
+ CheckUnstableUsage();
+#if DCHECK_IS_ON()
+ if (delta <= 0)
+ parent_deque_->CheckValidIndexOrEnd(index_);
+ else
+ parent_deque_->CheckValidIndex(index_);
+#endif
+ // It should be valid to add 0 to any iterator, even if the container is
+ // empty and the iterator points to end(). The modulo below will divide
+ // by 0 if the buffer capacity is empty, so it's important to check for
+ // this case explicitly.
+ if (delta == 0)
+ return;
+
+ difference_type new_offset = OffsetFromBegin() + delta;
+ DCHECK(new_offset >= 0 &&
+ new_offset <= static_cast<difference_type>(parent_deque_->size()));
+ index_ = (new_offset + parent_deque_->begin_) %
+ parent_deque_->buffer_.capacity();
+ }
+
+#if DCHECK_IS_ON()
+ void CheckUnstableUsage() const {
+ DCHECK(parent_deque_);
+ // Since circular_deque doesn't guarantee stability, any attempt to
+ // dereference this iterator after a mutation (i.e. the generation doesn't
+ // match the original) in the container is illegal.
+ DCHECK_EQ(created_generation_, parent_deque_->generation_)
+ << "circular_deque iterator dereferenced after mutation.";
+ }
+ void CheckComparable(const circular_deque_const_iterator& other) const {
+ DCHECK_EQ(parent_deque_, other.parent_deque_);
+ // Since circular_deque doesn't guarantee stability, two iterators that
+ // are compared must have been generated without mutating the container.
+ // If this fires, the container was mutated between generating the two
+ // iterators being compared.
+ DCHECK_EQ(created_generation_, other.created_generation_);
+ }
+#else
+ inline void CheckUnstableUsage() const {}
+ inline void CheckComparable(const circular_deque_const_iterator&) const {}
+#endif // DCHECK_IS_ON()
+
+ const circular_deque<T>* parent_deque_;
+ size_t index_;
+
+#if DCHECK_IS_ON()
+ // The generation of the parent deque when this iterator was created. The
+ // container will update the generation for every modification so we can
+ // test if the container was modified by comparing them.
+ uint64_t created_generation_;
+#endif // DCHECK_IS_ON()
+};
+
+template <typename T>
+class circular_deque_iterator : public circular_deque_const_iterator<T> {
+ using base = circular_deque_const_iterator<T>;
+
+ public:
+ friend class circular_deque<T>;
+
+ using difference_type = std::ptrdiff_t;
+ using value_type = T;
+ using pointer = T*;
+ using reference = T&;
+ using iterator_category = std::random_access_iterator_tag;
+
+ // Expose the base class' constructor.
+ circular_deque_iterator() : circular_deque_const_iterator<T>() {}
+
+ // Dereferencing.
+ T& operator*() const { return const_cast<T&>(base::operator*()); }
+ T* operator->() const { return const_cast<T*>(base::operator->()); }
+ T& operator[](difference_type i) {
+ return const_cast<T&>(base::operator[](i));
+ }
+
+ // Random access mutation.
+ friend circular_deque_iterator operator+(const circular_deque_iterator& iter,
+ difference_type offset) {
+ circular_deque_iterator ret = iter;
+ ret.Add(offset);
+ return ret;
+ }
+ circular_deque_iterator& operator+=(difference_type offset) {
+ base::Add(offset);
+ return *this;
+ }
+ friend circular_deque_iterator operator-(const circular_deque_iterator& iter,
+ difference_type offset) {
+ circular_deque_iterator ret = iter;
+ ret.Add(-offset);
+ return ret;
+ }
+ circular_deque_iterator& operator-=(difference_type offset) {
+ base::Add(-offset);
+ return *this;
+ }
+
+ // Increment and decrement.
+ circular_deque_iterator& operator++() {
+ base::Increment();
+ return *this;
+ }
+ circular_deque_iterator operator++(int) {
+ circular_deque_iterator ret = *this;
+ base::Increment();
+ return ret;
+ }
+ circular_deque_iterator& operator--() {
+ base::Decrement();
+ return *this;
+ }
+ circular_deque_iterator operator--(int) {
+ circular_deque_iterator ret = *this;
+ base::Decrement();
+ return ret;
+ }
+
+ private:
+ circular_deque_iterator(const circular_deque<T>* parent, size_t index)
+ : circular_deque_const_iterator<T>(parent, index) {}
+};
+
+} // namespace internal
+
+template <typename T>
+class circular_deque {
+ private:
+ using VectorBuffer = internal::VectorBuffer<T>;
+
+ public:
+ using value_type = T;
+ using size_type = std::size_t;
+ using difference_type = std::ptrdiff_t;
+ using reference = value_type&;
+ using const_reference = const value_type&;
+ using pointer = value_type*;
+ using const_pointer = const value_type*;
+
+ using iterator = internal::circular_deque_iterator<T>;
+ using const_iterator = internal::circular_deque_const_iterator<T>;
+ using reverse_iterator = std::reverse_iterator<iterator>;
+ using const_reverse_iterator = std::reverse_iterator<const_iterator>;
+
+ // ---------------------------------------------------------------------------
+ // Constructor
+
+ constexpr circular_deque() = default;
+
+ // Constructs with |count| copies of |value| or default constructed version.
+ circular_deque(size_type count) { resize(count); }
+ circular_deque(size_type count, const T& value) { resize(count, value); }
+
+ // Range constructor.
+ template <class InputIterator>
+ circular_deque(InputIterator first, InputIterator last) {
+ assign(first, last);
+ }
+
+ // Copy/move.
+ circular_deque(const circular_deque& other) : buffer_(other.size() + 1) {
+ assign(other.begin(), other.end());
+ }
+ circular_deque(circular_deque&& other) noexcept
+ : buffer_(std::move(other.buffer_)),
+ begin_(other.begin_),
+ end_(other.end_) {
+ other.begin_ = 0;
+ other.end_ = 0;
+ }
+
+ circular_deque(std::initializer_list<value_type> init) { assign(init); }
+
+ ~circular_deque() { DestructRange(begin_, end_); }
+
+ // ---------------------------------------------------------------------------
+ // Assignments.
+ //
+ // All of these may invalidate iterators and references.
+
+ circular_deque& operator=(const circular_deque& other) {
+ if (&other == this)
+ return *this;
+
+ reserve(other.size());
+ assign(other.begin(), other.end());
+ return *this;
+ }
+ circular_deque& operator=(circular_deque&& other) noexcept {
+ if (&other == this)
+ return *this;
+
+ // We're about to overwrite the buffer, so don't free it in clear to
+ // avoid doing it twice.
+ ClearRetainCapacity();
+ buffer_ = std::move(other.buffer_);
+ begin_ = other.begin_;
+ end_ = other.end_;
+
+ other.begin_ = 0;
+ other.end_ = 0;
+
+ IncrementGeneration();
+ return *this;
+ }
+ circular_deque& operator=(std::initializer_list<value_type> ilist) {
+ reserve(ilist.size());
+ assign(std::begin(ilist), std::end(ilist));
+ return *this;
+ }
+
+ void assign(size_type count, const value_type& value) {
+ ClearRetainCapacity();
+ reserve(count);
+ for (size_t i = 0; i < count; i++)
+ emplace_back(value);
+ IncrementGeneration();
+ }
+
+ // This variant should be enabled only when InputIterator is an iterator.
+ template <typename InputIterator>
+ typename std::enable_if<::base::internal::is_iterator<InputIterator>::value,
+ void>::type
+ assign(InputIterator first, InputIterator last) {
+ // Possible future enhancement, dispatch on iterator tag type. For forward
+ // iterators we can use std::difference to preallocate the space required
+ // and only do one copy.
+ ClearRetainCapacity();
+ for (; first != last; ++first)
+ emplace_back(*first);
+ IncrementGeneration();
+ }
+
+ void assign(std::initializer_list<value_type> value) {
+ reserve(std::distance(value.begin(), value.end()));
+ assign(value.begin(), value.end());
+ }
+
+ // ---------------------------------------------------------------------------
+ // Accessors.
+ //
+ // Since this class assumes no exceptions, at() and operator[] are equivalent.
+
+ const value_type& at(size_type i) const {
+ DCHECK(i < size());
+ size_t right_size = buffer_.capacity() - begin_;
+ if (begin_ <= end_ || i < right_size)
+ return buffer_[begin_ + i];
+ return buffer_[i - right_size];
+ }
+ value_type& at(size_type i) {
+ return const_cast<value_type&>(as_const(*this).at(i));
+ }
+
+ value_type& operator[](size_type i) {
+ return const_cast<value_type&>(as_const(*this)[i]);
+ }
+
+ const value_type& operator[](size_type i) const { return at(i); }
+
+ value_type& front() {
+ DCHECK(!empty());
+ return buffer_[begin_];
+ }
+ const value_type& front() const {
+ DCHECK(!empty());
+ return buffer_[begin_];
+ }
+
+ value_type& back() {
+ DCHECK(!empty());
+ return *(--end());
+ }
+ const value_type& back() const {
+ DCHECK(!empty());
+ return *(--end());
+ }
+
+ // ---------------------------------------------------------------------------
+ // Iterators.
+
+ iterator begin() { return iterator(this, begin_); }
+ const_iterator begin() const { return const_iterator(this, begin_); }
+ const_iterator cbegin() const { return const_iterator(this, begin_); }
+
+ iterator end() { return iterator(this, end_); }
+ const_iterator end() const { return const_iterator(this, end_); }
+ const_iterator cend() const { return const_iterator(this, end_); }
+
+ reverse_iterator rbegin() { return reverse_iterator(end()); }
+ const_reverse_iterator rbegin() const {
+ return const_reverse_iterator(end());
+ }
+ const_reverse_iterator crbegin() const { return rbegin(); }
+
+ reverse_iterator rend() { return reverse_iterator(begin()); }
+ const_reverse_iterator rend() const {
+ return const_reverse_iterator(begin());
+ }
+ const_reverse_iterator crend() const { return rend(); }
+
+ // ---------------------------------------------------------------------------
+ // Memory management.
+
+ // IMPORTANT NOTE ON reserve(...): This class implements auto-shrinking of
+ // the buffer when elements are deleted and there is "too much" wasted space.
+ // So if you call reserve() with a large size in anticipation of pushing many
+ // elements, but pop an element before the queue is full, the capacity you
+ // reserved may be lost.
+ //
+ // As a result, it's only worthwhile to call reserve() when you're adding
+ // many things at once with no intermediate operations.
+ void reserve(size_type new_capacity) {
+ if (new_capacity > capacity())
+ SetCapacityTo(new_capacity);
+ }
+
+ size_type capacity() const {
+ // One item is wasted to indicate end().
+ return buffer_.capacity() == 0 ? 0 : buffer_.capacity() - 1;
+ }
+
+ void shrink_to_fit() {
+ if (empty()) {
+ // Optimize empty case to really delete everything if there was
+ // something.
+ if (buffer_.capacity())
+ buffer_ = VectorBuffer();
+ } else {
+ SetCapacityTo(size());
+ }
+ }
+
+ // ---------------------------------------------------------------------------
+ // Size management.
+
+ // This will additionally reset the capacity() to 0.
+ void clear() {
+ // This can't resize(0) because that requires a default constructor to
+ // compile, which not all contained classes may implement.
+ ClearRetainCapacity();
+ buffer_ = VectorBuffer();
+ }
+
+ bool empty() const { return begin_ == end_; }
+
+ size_type size() const {
+ if (begin_ <= end_)
+ return end_ - begin_;
+ return buffer_.capacity() - begin_ + end_;
+ }
+
+ // When reducing size, the elements are deleted from the end. When expanding
+ // size, elements are added to the end with |value| or the default
+ // constructed version. Even when using resize(count) to shrink, a default
+ // constructor is required for the code to compile, even though it will not
+ // be called.
+ //
+ // There are two versions rather than using a default value to avoid
+ // creating a temporary when shrinking (when it's not needed). Plus if
+ // the default constructor is desired when expanding usually just calling it
+ // for each element is faster than making a default-constructed temporary and
+ // copying it.
+ void resize(size_type count) {
+ // SEE BELOW VERSION if you change this. The code is mostly the same.
+ if (count > size()) {
+ // This could be slighly more efficient but expanding a queue with
+ // identical elements is unusual and the extra computations of emplacing
+ // one-by-one will typically be small relative to calling the constructor
+ // for every item.
+ ExpandCapacityIfNecessary(count - size());
+ while (size() < count)
+ emplace_back();
+ } else if (count < size()) {
+ size_t new_end = (begin_ + count) % buffer_.capacity();
+ DestructRange(new_end, end_);
+ end_ = new_end;
+
+ ShrinkCapacityIfNecessary();
+ }
+ IncrementGeneration();
+ }
+ void resize(size_type count, const value_type& value) {
+ // SEE ABOVE VERSION if you change this. The code is mostly the same.
+ if (count > size()) {
+ ExpandCapacityIfNecessary(count - size());
+ while (size() < count)
+ emplace_back(value);
+ } else if (count < size()) {
+ size_t new_end = (begin_ + count) % buffer_.capacity();
+ DestructRange(new_end, end_);
+ end_ = new_end;
+
+ ShrinkCapacityIfNecessary();
+ }
+ IncrementGeneration();
+ }
+
+ // ---------------------------------------------------------------------------
+ // Insert and erase.
+ //
+ // Insertion and deletion in the middle is O(n) and invalidates all existing
+ // iterators.
+ //
+ // The implementation of insert isn't optimized as much as it could be. If
+ // the insertion requires that the buffer be grown, it will first be grown
+ // and everything moved, and then the items will be inserted, potentially
+ // moving some items twice. This simplifies the implemntation substantially
+ // and means less generated templatized code. Since this is an uncommon
+ // operation for deques, and already relatively slow, it doesn't seem worth
+ // the benefit to optimize this.
+
+ void insert(const_iterator pos, size_type count, const T& value) {
+ ValidateIterator(pos);
+
+ // Optimize insert at the beginning.
+ if (pos == begin()) {
+ ExpandCapacityIfNecessary(count);
+ for (size_t i = 0; i < count; i++)
+ push_front(value);
+ return;
+ }
+
+ iterator insert_cur(this, pos.index_);
+ iterator insert_end;
+ MakeRoomFor(count, &insert_cur, &insert_end);
+ while (insert_cur < insert_end) {
+ new (&buffer_[insert_cur.index_]) T(value);
+ ++insert_cur;
+ }
+
+ IncrementGeneration();
+ }
+
+ // This enable_if keeps this call from getting confused with the (pos, count,
+ // value) version when value is an integer.
+ template <class InputIterator>
+ typename std::enable_if<::base::internal::is_iterator<InputIterator>::value,
+ void>::type
+ insert(const_iterator pos, InputIterator first, InputIterator last) {
+ ValidateIterator(pos);
+
+ size_t inserted_items = std::distance(first, last);
+ if (inserted_items == 0)
+ return; // Can divide by 0 when doing modulo below, so return early.
+
+ // Make a hole to copy the items into.
+ iterator insert_cur;
+ iterator insert_end;
+ if (pos == begin()) {
+ // Optimize insert at the beginning, nothing needs to be shifted and the
+ // hole is the |inserted_items| block immediately before |begin_|.
+ ExpandCapacityIfNecessary(inserted_items);
+ insert_end = begin();
+ begin_ =
+ (begin_ + buffer_.capacity() - inserted_items) % buffer_.capacity();
+ insert_cur = begin();
+ } else {
+ insert_cur = iterator(this, pos.index_);
+ MakeRoomFor(inserted_items, &insert_cur, &insert_end);
+ }
+
+ // Copy the items.
+ while (insert_cur < insert_end) {
+ new (&buffer_[insert_cur.index_]) T(*first);
+ ++insert_cur;
+ ++first;
+ }
+
+ IncrementGeneration();
+ }
+
+ // These all return an iterator to the inserted item. Existing iterators will
+ // be invalidated.
+ iterator insert(const_iterator pos, const T& value) {
+ return emplace(pos, value);
+ }
+ iterator insert(const_iterator pos, T&& value) {
+ return emplace(pos, std::move(value));
+ }
+ template <class... Args>
+ iterator emplace(const_iterator pos, Args&&... args) {
+ ValidateIterator(pos);
+
+ // Optimize insert at beginning which doesn't require shifting.
+ if (pos == cbegin()) {
+ emplace_front(std::forward<Args>(args)...);
+ return begin();
+ }
+
+ // Do this before we make the new iterators we return.
+ IncrementGeneration();
+
+ iterator insert_begin(this, pos.index_);
+ iterator insert_end;
+ MakeRoomFor(1, &insert_begin, &insert_end);
+ new (&buffer_[insert_begin.index_]) T(std::forward<Args>(args)...);
+
+ return insert_begin;
+ }
+
+ // Calling erase() won't automatically resize the buffer smaller like resize
+ // or the pop functions. Erase is slow and relatively uncommon, and for
+ // normal deque usage a pop will normally be done on a regular basis that
+ // will prevent excessive buffer usage over long periods of time. It's not
+ // worth having the extra code for every template instantiation of erase()
+ // to resize capacity downward to a new buffer.
+ iterator erase(const_iterator pos) { return erase(pos, pos + 1); }
+ iterator erase(const_iterator first, const_iterator last) {
+ ValidateIterator(first);
+ ValidateIterator(last);
+
+ IncrementGeneration();
+
+ // First, call the destructor on the deleted items.
+ if (first.index_ == last.index_) {
+ // Nothing deleted. Need to return early to avoid falling through to
+ // moving items on top of themselves.
+ return iterator(this, first.index_);
+ } else if (first.index_ < last.index_) {
+ // Contiguous range.
+ buffer_.DestructRange(&buffer_[first.index_], &buffer_[last.index_]);
+ } else {
+ // Deleted range wraps around.
+ buffer_.DestructRange(&buffer_[first.index_],
+ &buffer_[buffer_.capacity()]);
+ buffer_.DestructRange(&buffer_[0], &buffer_[last.index_]);
+ }
+
+ if (first.index_ == begin_) {
+ // This deletion is from the beginning. Nothing needs to be copied, only
+ // begin_ needs to be updated.
+ begin_ = last.index_;
+ return iterator(this, last.index_);
+ }
+
+ // In an erase operation, the shifted items all move logically to the left,
+ // so move them from left-to-right.
+ iterator move_src(this, last.index_);
+ iterator move_src_end = end();
+ iterator move_dest(this, first.index_);
+ for (; move_src < move_src_end; move_src++, move_dest++) {
+ buffer_.MoveRange(&buffer_[move_src.index_],
+ &buffer_[move_src.index_ + 1],
+ &buffer_[move_dest.index_]);
+ }
+
+ end_ = move_dest.index_;
+
+ // Since we did not reallocate and only changed things after the erase
+ // element(s), the input iterator's index points to the thing following the
+ // deletion.
+ return iterator(this, first.index_);
+ }
+
+ // ---------------------------------------------------------------------------
+ // Begin/end operations.
+
+ void push_front(const T& value) { emplace_front(value); }
+ void push_front(T&& value) { emplace_front(std::move(value)); }
+
+ void push_back(const T& value) { emplace_back(value); }
+ void push_back(T&& value) { emplace_back(std::move(value)); }
+
+ template <class... Args>
+ reference emplace_front(Args&&... args) {
+ ExpandCapacityIfNecessary(1);
+ if (begin_ == 0)
+ begin_ = buffer_.capacity() - 1;
+ else
+ begin_--;
+ IncrementGeneration();
+ new (&buffer_[begin_]) T(std::forward<Args>(args)...);
+ return front();
+ }
+
+ template <class... Args>
+ reference emplace_back(Args&&... args) {
+ ExpandCapacityIfNecessary(1);
+ new (&buffer_[end_]) T(std::forward<Args>(args)...);
+ if (end_ == buffer_.capacity() - 1)
+ end_ = 0;
+ else
+ end_++;
+ IncrementGeneration();
+ return back();
+ }
+
+ void pop_front() {
+ DCHECK(size());
+ buffer_.DestructRange(&buffer_[begin_], &buffer_[begin_ + 1]);
+ begin_++;
+ if (begin_ == buffer_.capacity())
+ begin_ = 0;
+
+ ShrinkCapacityIfNecessary();
+
+ // Technically popping will not invalidate any iterators since the
+ // underlying buffer will be stable. But in the future we may want to add a
+ // feature that resizes the buffer smaller if there is too much wasted
+ // space. This ensures we can make such a change safely.
+ IncrementGeneration();
+ }
+ void pop_back() {
+ DCHECK(size());
+ if (end_ == 0)
+ end_ = buffer_.capacity() - 1;
+ else
+ end_--;
+ buffer_.DestructRange(&buffer_[end_], &buffer_[end_ + 1]);
+
+ ShrinkCapacityIfNecessary();
+
+ // See pop_front comment about why this is here.
+ IncrementGeneration();
+ }
+
+ // ---------------------------------------------------------------------------
+ // General operations.
+
+ void swap(circular_deque& other) {
+ std::swap(buffer_, other.buffer_);
+ std::swap(begin_, other.begin_);
+ std::swap(end_, other.end_);
+ IncrementGeneration();
+ }
+
+ friend void swap(circular_deque& lhs, circular_deque& rhs) { lhs.swap(rhs); }
+
+ private:
+ friend internal::circular_deque_iterator<T>;
+ friend internal::circular_deque_const_iterator<T>;
+
+ // Moves the items in the given circular buffer to the current one. The
+ // source is moved from so will become invalid. The destination buffer must
+ // have already been allocated with enough size.
+ static void MoveBuffer(VectorBuffer& from_buf,
+ size_t from_begin,
+ size_t from_end,
+ VectorBuffer* to_buf,
+ size_t* to_begin,
+ size_t* to_end) {
+ size_t from_capacity = from_buf.capacity();
+
+ *to_begin = 0;
+ if (from_begin < from_end) {
+ // Contiguous.
+ from_buf.MoveRange(&from_buf[from_begin], &from_buf[from_end],
+ to_buf->begin());
+ *to_end = from_end - from_begin;
+ } else if (from_begin > from_end) {
+ // Discontiguous, copy the right side to the beginning of the new buffer.
+ from_buf.MoveRange(&from_buf[from_begin], &from_buf[from_capacity],
+ to_buf->begin());
+ size_t right_size = from_capacity - from_begin;
+ // Append the left side.
+ from_buf.MoveRange(&from_buf[0], &from_buf[from_end],
+ &(*to_buf)[right_size]);
+ *to_end = right_size + from_end;
+ } else {
+ // No items.
+ *to_end = 0;
+ }
+ }
+
+ // Expands the buffer size. This assumes the size is larger than the
+ // number of elements in the vector (it won't call delete on anything).
+ void SetCapacityTo(size_t new_capacity) {
+ // Use the capacity + 1 as the internal buffer size to differentiate
+ // empty and full (see definition of buffer_ below).
+ VectorBuffer new_buffer(new_capacity + 1);
+ MoveBuffer(buffer_, begin_, end_, &new_buffer, &begin_, &end_);
+ buffer_ = std::move(new_buffer);
+ }
+ void ExpandCapacityIfNecessary(size_t additional_elts) {
+ size_t min_new_capacity = size() + additional_elts;
+ if (capacity() >= min_new_capacity)
+ return; // Already enough room.
+
+ min_new_capacity =
+ std::max(min_new_capacity, internal::kCircularBufferInitialCapacity);
+
+ // std::vector always grows by at least 50%. WTF::Deque grows by at least
+ // 25%. We expect queue workloads to generally stay at a similar size and
+ // grow less than a vector might, so use 25%.
+ size_t new_capacity =
+ std::max(min_new_capacity, capacity() + capacity() / 4);
+ SetCapacityTo(new_capacity);
+ }
+
+ void ShrinkCapacityIfNecessary() {
+ // Don't auto-shrink below this size.
+ if (capacity() <= internal::kCircularBufferInitialCapacity)
+ return;
+
+ // Shrink when 100% of the size() is wasted.
+ size_t sz = size();
+ size_t empty_spaces = capacity() - sz;
+ if (empty_spaces < sz)
+ return;
+
+ // Leave 1/4 the size as free capacity, not going below the initial
+ // capacity.
+ size_t new_capacity =
+ std::max(internal::kCircularBufferInitialCapacity, sz + sz / 4);
+ if (new_capacity < capacity()) {
+ // Count extra item to convert to internal capacity.
+ SetCapacityTo(new_capacity);
+ }
+ }
+
+ // Backend for clear() but does not resize the internal buffer.
+ void ClearRetainCapacity() {
+ // This can't resize(0) because that requires a default constructor to
+ // compile, which not all contained classes may implement.
+ DestructRange(begin_, end_);
+ begin_ = 0;
+ end_ = 0;
+ IncrementGeneration();
+ }
+
+ // Calls destructors for the given begin->end indices. The indices may wrap
+ // around. The buffer is not resized, and the begin_ and end_ members are
+ // not changed.
+ void DestructRange(size_t begin, size_t end) {
+ if (end == begin) {
+ return;
+ } else if (end > begin) {
+ buffer_.DestructRange(&buffer_[begin], &buffer_[end]);
+ } else {
+ buffer_.DestructRange(&buffer_[begin], &buffer_[buffer_.capacity()]);
+ buffer_.DestructRange(&buffer_[0], &buffer_[end]);
+ }
+ }
+
+ // Makes room for |count| items starting at |*insert_begin|. Since iterators
+ // are not stable across buffer resizes, |*insert_begin| will be updated to
+ // point to the beginning of the newly opened position in the new array (it's
+ // in/out), and the end of the newly opened position (it's out-only).
+ void MakeRoomFor(size_t count, iterator* insert_begin, iterator* insert_end) {
+ if (count == 0) {
+ *insert_end = *insert_begin;
+ return;
+ }
+
+ // The offset from the beginning will be stable across reallocations.
+ size_t begin_offset = insert_begin->OffsetFromBegin();
+ ExpandCapacityIfNecessary(count);
+
+ insert_begin->index_ = (begin_ + begin_offset) % buffer_.capacity();
+ *insert_end =
+ iterator(this, (insert_begin->index_ + count) % buffer_.capacity());
+
+ // Update the new end and prepare the iterators for copying.
+ iterator src = end();
+ end_ = (end_ + count) % buffer_.capacity();
+ iterator dest = end();
+
+ // Move the elements. This will always involve shifting logically to the
+ // right, so move in a right-to-left order.
+ while (true) {
+ if (src == *insert_begin)
+ break;
+ --src;
+ --dest;
+ buffer_.MoveRange(&buffer_[src.index_], &buffer_[src.index_ + 1],
+ &buffer_[dest.index_]);
+ }
+ }
+
+#if DCHECK_IS_ON()
+ // Asserts the given index is dereferencable. The index is an index into the
+ // buffer, not an index used by operator[] or at() which will be offsets from
+ // begin.
+ void CheckValidIndex(size_t i) const {
+ if (begin_ <= end_)
+ DCHECK(i >= begin_ && i < end_);
+ else
+ DCHECK((i >= begin_ && i < buffer_.capacity()) || i < end_);
+ }
+
+ // Asserts the given index is either dereferencable or points to end().
+ void CheckValidIndexOrEnd(size_t i) const {
+ if (i != end_)
+ CheckValidIndex(i);
+ }
+
+ void ValidateIterator(const const_iterator& i) const {
+ DCHECK(i.parent_deque_ == this);
+ i.CheckUnstableUsage();
+ }
+
+ // See generation_ below.
+ void IncrementGeneration() { generation_++; }
+#else
+ // No-op versions of these functions for release builds.
+ void CheckValidIndex(size_t) const {}
+ void CheckValidIndexOrEnd(size_t) const {}
+ void ValidateIterator(const const_iterator& i) const {}
+ void IncrementGeneration() {}
+#endif
+
+ // Danger, the buffer_.capacity() is the "internal capacity" which is
+ // capacity() + 1 since there is an extra item to indicate the end. Otherwise
+ // being completely empty and completely full are indistinguishable (begin ==
+ // end). We could add a separate flag to avoid it, but that adds significant
+ // extra complexity since every computation will have to check for it. Always
+ // keeping one extra unused element in the buffer makes iterator computations
+ // much simpler.
+ //
+ // Container internal code will want to use buffer_.capacity() for offset
+ // computations rather than capacity().
+ VectorBuffer buffer_;
+ size_type begin_ = 0;
+ size_type end_ = 0;
+
+#if DCHECK_IS_ON()
+ // Incremented every time a modification is made that could affect iterator
+ // invalidations.
+ uint64_t generation_ = 0;
+#endif
+};
+
+// Implementations of base::Erase[If] (see base/stl_util.h).
+template <class T, class Value>
+void Erase(circular_deque<T>& container, const Value& value) {
+ container.erase(std::remove(container.begin(), container.end(), value),
+ container.end());
+}
+
+template <class T, class Predicate>
+void EraseIf(circular_deque<T>& container, Predicate pred) {
+ container.erase(std::remove_if(container.begin(), container.end(), pred),
+ container.end());
+}
+
+} // namespace base
+
+#endif // BASE_CONTAINERS_CIRCULAR_DEQUE_H_
diff --git a/security/sandbox/chromium/base/containers/span.h b/security/sandbox/chromium/base/containers/span.h
new file mode 100644
index 0000000000..ce2e3c47e5
--- /dev/null
+++ b/security/sandbox/chromium/base/containers/span.h
@@ -0,0 +1,530 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CONTAINERS_SPAN_H_
+#define BASE_CONTAINERS_SPAN_H_
+
+#include <stddef.h>
+
+#include <algorithm>
+#include <array>
+#include <iterator>
+#include <limits>
+#include <type_traits>
+#include <utility>
+
+#include "base/containers/checked_iterators.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/stl_util.h"
+
+namespace base {
+
+// [views.constants]
+constexpr size_t dynamic_extent = std::numeric_limits<size_t>::max();
+
+template <typename T, size_t Extent = dynamic_extent>
+class span;
+
+namespace internal {
+
+template <typename T>
+struct ExtentImpl : std::integral_constant<size_t, dynamic_extent> {};
+
+template <typename T, size_t N>
+struct ExtentImpl<T[N]> : std::integral_constant<size_t, N> {};
+
+template <typename T, size_t N>
+struct ExtentImpl<std::array<T, N>> : std::integral_constant<size_t, N> {};
+
+template <typename T, size_t N>
+struct ExtentImpl<base::span<T, N>> : std::integral_constant<size_t, N> {};
+
+template <typename T>
+using Extent = ExtentImpl<std::remove_cv_t<std::remove_reference_t<T>>>;
+
+template <typename T>
+struct IsSpanImpl : std::false_type {};
+
+template <typename T, size_t Extent>
+struct IsSpanImpl<span<T, Extent>> : std::true_type {};
+
+template <typename T>
+using IsSpan = IsSpanImpl<std::decay_t<T>>;
+
+template <typename T>
+struct IsStdArrayImpl : std::false_type {};
+
+template <typename T, size_t N>
+struct IsStdArrayImpl<std::array<T, N>> : std::true_type {};
+
+template <typename T>
+using IsStdArray = IsStdArrayImpl<std::decay_t<T>>;
+
+template <typename T>
+using IsCArray = std::is_array<std::remove_reference_t<T>>;
+
+template <typename From, typename To>
+using IsLegalDataConversion = std::is_convertible<From (*)[], To (*)[]>;
+
+template <typename Container, typename T>
+using ContainerHasConvertibleData = IsLegalDataConversion<
+ std::remove_pointer_t<decltype(base::data(std::declval<Container>()))>,
+ T>;
+
+template <typename Container>
+using ContainerHasIntegralSize =
+ std::is_integral<decltype(base::size(std::declval<Container>()))>;
+
+template <typename From, size_t FromExtent, typename To, size_t ToExtent>
+using EnableIfLegalSpanConversion =
+ std::enable_if_t<(ToExtent == dynamic_extent || ToExtent == FromExtent) &&
+ IsLegalDataConversion<From, To>::value>;
+
+// SFINAE check if Array can be converted to a span<T>.
+template <typename Array, typename T, size_t Extent>
+using EnableIfSpanCompatibleArray =
+ std::enable_if_t<(Extent == dynamic_extent ||
+ Extent == internal::Extent<Array>::value) &&
+ ContainerHasConvertibleData<Array, T>::value>;
+
+// SFINAE check if Container can be converted to a span<T>.
+template <typename Container, typename T>
+using IsSpanCompatibleContainer =
+ std::conditional_t<!IsSpan<Container>::value &&
+ !IsStdArray<Container>::value &&
+ !IsCArray<Container>::value &&
+ ContainerHasConvertibleData<Container, T>::value &&
+ ContainerHasIntegralSize<Container>::value,
+ std::true_type,
+ std::false_type>;
+
+template <typename Container, typename T>
+using EnableIfSpanCompatibleContainer =
+ std::enable_if_t<IsSpanCompatibleContainer<Container, T>::value>;
+
+template <typename Container, typename T, size_t Extent>
+using EnableIfSpanCompatibleContainerAndSpanIsDynamic =
+ std::enable_if_t<IsSpanCompatibleContainer<Container, T>::value &&
+ Extent == dynamic_extent>;
+
+// A helper template for storing the size of a span. Spans with static extents
+// don't require additional storage, since the extent itself is specified in the
+// template parameter.
+template <size_t Extent>
+class ExtentStorage {
+ public:
+ constexpr explicit ExtentStorage(size_t size) noexcept {}
+ constexpr size_t size() const noexcept { return Extent; }
+};
+
+// Specialization of ExtentStorage for dynamic extents, which do require
+// explicit storage for the size.
+template <>
+struct ExtentStorage<dynamic_extent> {
+ constexpr explicit ExtentStorage(size_t size) noexcept : size_(size) {}
+ constexpr size_t size() const noexcept { return size_; }
+
+ private:
+ size_t size_;
+};
+
+} // namespace internal
+
+// A span is a value type that represents an array of elements of type T. Since
+// it only consists of a pointer to memory with an associated size, it is very
+// light-weight. It is cheap to construct, copy, move and use spans, so that
+// users are encouraged to use it as a pass-by-value parameter. A span does not
+// own the underlying memory, so care must be taken to ensure that a span does
+// not outlive the backing store.
+//
+// span is somewhat analogous to StringPiece, but with arbitrary element types,
+// allowing mutation if T is non-const.
+//
+// span is implicitly convertible from C++ arrays, as well as most [1]
+// container-like types that provide a data() and size() method (such as
+// std::vector<T>). A mutable span<T> can also be implicitly converted to an
+// immutable span<const T>.
+//
+// Consider using a span for functions that take a data pointer and size
+// parameter: it allows the function to still act on an array-like type, while
+// allowing the caller code to be a bit more concise.
+//
+// For read-only data access pass a span<const T>: the caller can supply either
+// a span<const T> or a span<T>, while the callee will have a read-only view.
+// For read-write access a mutable span<T> is required.
+//
+// Without span:
+// Read-Only:
+// // std::string HexEncode(const uint8_t* data, size_t size);
+// std::vector<uint8_t> data_buffer = GenerateData();
+// std::string r = HexEncode(data_buffer.data(), data_buffer.size());
+//
+// Mutable:
+// // ssize_t SafeSNPrintf(char* buf, size_t N, const char* fmt, Args...);
+// char str_buffer[100];
+// SafeSNPrintf(str_buffer, sizeof(str_buffer), "Pi ~= %lf", 3.14);
+//
+// With span:
+// Read-Only:
+// // std::string HexEncode(base::span<const uint8_t> data);
+// std::vector<uint8_t> data_buffer = GenerateData();
+// std::string r = HexEncode(data_buffer);
+//
+// Mutable:
+// // ssize_t SafeSNPrintf(base::span<char>, const char* fmt, Args...);
+// char str_buffer[100];
+// SafeSNPrintf(str_buffer, "Pi ~= %lf", 3.14);
+//
+// Spans with "const" and pointers
+// -------------------------------
+//
+// Const and pointers can get confusing. Here are vectors of pointers and their
+// corresponding spans:
+//
+// const std::vector<int*> => base::span<int* const>
+// std::vector<const int*> => base::span<const int*>
+// const std::vector<const int*> => base::span<const int* const>
+//
+// Differences from the C++20 draft
+// --------------------------------
+//
+// http://eel.is/c++draft/views contains the latest C++20 draft of std::span.
+// Chromium tries to follow the draft as close as possible. Differences between
+// the draft and the implementation are documented in subsections below.
+//
+// Differences from [span.objectrep]:
+// - as_bytes() and as_writable_bytes() return spans of uint8_t instead of
+// std::byte (std::byte is a C++17 feature)
+//
+// Differences from [span.cons]:
+// - Constructing a static span (i.e. Extent != dynamic_extent) from a dynamic
+// sized container (e.g. std::vector) requires an explicit conversion (in the
+// C++20 draft this is simply UB)
+//
+// Differences from [span.obs]:
+// - empty() is marked with WARN_UNUSED_RESULT instead of [[nodiscard]]
+// ([[nodiscard]] is a C++17 feature)
+//
+// Furthermore, all constructors and methods are marked noexcept due to the lack
+// of exceptions in Chromium.
+//
+// Due to the lack of class template argument deduction guides in C++14
+// appropriate make_span() utility functions are provided.
+
+// [span], class template span
+template <typename T, size_t Extent>
+class span : public internal::ExtentStorage<Extent> {
+ private:
+ using ExtentStorage = internal::ExtentStorage<Extent>;
+
+ public:
+ using element_type = T;
+ using value_type = std::remove_cv_t<T>;
+ using size_type = size_t;
+ using difference_type = ptrdiff_t;
+ using pointer = T*;
+ using reference = T&;
+ using iterator = CheckedContiguousIterator<T>;
+ using const_iterator = CheckedContiguousConstIterator<T>;
+ using reverse_iterator = std::reverse_iterator<iterator>;
+ using const_reverse_iterator = std::reverse_iterator<const_iterator>;
+ static constexpr size_t extent = Extent;
+
+ // [span.cons], span constructors, copy, assignment, and destructor
+ constexpr span() noexcept : ExtentStorage(0), data_(nullptr) {
+ static_assert(Extent == dynamic_extent || Extent == 0, "Invalid Extent");
+ }
+
+ constexpr span(T* data, size_t size) noexcept
+ : ExtentStorage(size), data_(data) {
+ CHECK(Extent == dynamic_extent || Extent == size);
+ }
+
+ // Artificially templatized to break ambiguity for span(ptr, 0).
+ template <typename = void>
+ constexpr span(T* begin, T* end) noexcept : span(begin, end - begin) {
+ // Note: CHECK_LE is not constexpr, hence regular CHECK must be used.
+ CHECK(begin <= end);
+ }
+
+ template <
+ size_t N,
+ typename = internal::EnableIfSpanCompatibleArray<T (&)[N], T, Extent>>
+ constexpr span(T (&array)[N]) noexcept : span(base::data(array), N) {}
+
+ template <
+ size_t N,
+ typename = internal::
+ EnableIfSpanCompatibleArray<std::array<value_type, N>&, T, Extent>>
+ constexpr span(std::array<value_type, N>& array) noexcept
+ : span(base::data(array), N) {}
+
+ template <size_t N,
+ typename = internal::EnableIfSpanCompatibleArray<
+ const std::array<value_type, N>&,
+ T,
+ Extent>>
+ constexpr span(const std::array<value_type, N>& array) noexcept
+ : span(base::data(array), N) {}
+
+ // Conversion from a container that has compatible base::data() and integral
+ // base::size().
+ template <
+ typename Container,
+ typename =
+ internal::EnableIfSpanCompatibleContainerAndSpanIsDynamic<Container&,
+ T,
+ Extent>>
+ constexpr span(Container& container) noexcept
+ : span(base::data(container), base::size(container)) {}
+
+ template <
+ typename Container,
+ typename = internal::EnableIfSpanCompatibleContainerAndSpanIsDynamic<
+ const Container&,
+ T,
+ Extent>>
+ constexpr span(const Container& container) noexcept
+ : span(base::data(container), base::size(container)) {}
+
+ constexpr span(const span& other) noexcept = default;
+
+ // Conversions from spans of compatible types and extents: this allows a
+ // span<T> to be seamlessly used as a span<const T>, but not the other way
+ // around. If extent is not dynamic, OtherExtent has to be equal to Extent.
+ template <
+ typename U,
+ size_t OtherExtent,
+ typename =
+ internal::EnableIfLegalSpanConversion<U, OtherExtent, T, Extent>>
+ constexpr span(const span<U, OtherExtent>& other)
+ : span(other.data(), other.size()) {}
+
+ constexpr span& operator=(const span& other) noexcept = default;
+ ~span() noexcept = default;
+
+ // [span.sub], span subviews
+ template <size_t Count>
+ constexpr span<T, Count> first() const noexcept {
+ static_assert(Extent == dynamic_extent || Count <= Extent,
+ "Count must not exceed Extent");
+ CHECK(Extent != dynamic_extent || Count <= size());
+ return {data(), Count};
+ }
+
+ template <size_t Count>
+ constexpr span<T, Count> last() const noexcept {
+ static_assert(Extent == dynamic_extent || Count <= Extent,
+ "Count must not exceed Extent");
+ CHECK(Extent != dynamic_extent || Count <= size());
+ return {data() + (size() - Count), Count};
+ }
+
+ template <size_t Offset, size_t Count = dynamic_extent>
+ constexpr span<T,
+ (Count != dynamic_extent
+ ? Count
+ : (Extent != dynamic_extent ? Extent - Offset
+ : dynamic_extent))>
+ subspan() const noexcept {
+ static_assert(Extent == dynamic_extent || Offset <= Extent,
+ "Offset must not exceed Extent");
+ static_assert(Extent == dynamic_extent || Count == dynamic_extent ||
+ Count <= Extent - Offset,
+ "Count must not exceed Extent - Offset");
+ CHECK(Extent != dynamic_extent || Offset <= size());
+ CHECK(Extent != dynamic_extent || Count == dynamic_extent ||
+ Count <= size() - Offset);
+ return {data() + Offset, Count != dynamic_extent ? Count : size() - Offset};
+ }
+
+ constexpr span<T, dynamic_extent> first(size_t count) const noexcept {
+ // Note: CHECK_LE is not constexpr, hence regular CHECK must be used.
+ CHECK(count <= size());
+ return {data(), count};
+ }
+
+ constexpr span<T, dynamic_extent> last(size_t count) const noexcept {
+ // Note: CHECK_LE is not constexpr, hence regular CHECK must be used.
+ CHECK(count <= size());
+ return {data() + (size() - count), count};
+ }
+
+ constexpr span<T, dynamic_extent> subspan(size_t offset,
+ size_t count = dynamic_extent) const
+ noexcept {
+ // Note: CHECK_LE is not constexpr, hence regular CHECK must be used.
+ CHECK(offset <= size());
+ CHECK(count == dynamic_extent || count <= size() - offset);
+ return {data() + offset, count != dynamic_extent ? count : size() - offset};
+ }
+
+ // [span.obs], span observers
+ constexpr size_t size() const noexcept { return ExtentStorage::size(); }
+ constexpr size_t size_bytes() const noexcept { return size() * sizeof(T); }
+ constexpr bool empty() const noexcept WARN_UNUSED_RESULT {
+ return size() == 0;
+ }
+
+ // [span.elem], span element access
+ constexpr T& operator[](size_t idx) const noexcept {
+ // Note: CHECK_LT is not constexpr, hence regular CHECK must be used.
+ CHECK(idx < size());
+ return *(data() + idx);
+ }
+
+ constexpr T& front() const noexcept {
+ static_assert(Extent == dynamic_extent || Extent > 0,
+ "Extent must not be 0");
+ CHECK(Extent != dynamic_extent || !empty());
+ return *data();
+ }
+
+ constexpr T& back() const noexcept {
+ static_assert(Extent == dynamic_extent || Extent > 0,
+ "Extent must not be 0");
+ CHECK(Extent != dynamic_extent || !empty());
+ return *(data() + size() - 1);
+ }
+
+ constexpr T* data() const noexcept { return data_; }
+
+ // [span.iter], span iterator support
+ constexpr iterator begin() const noexcept {
+ return iterator(data_, data_ + size());
+ }
+ constexpr iterator end() const noexcept {
+ return iterator(data_, data_ + size(), data_ + size());
+ }
+
+ constexpr const_iterator cbegin() const noexcept { return begin(); }
+ constexpr const_iterator cend() const noexcept { return end(); }
+
+ constexpr reverse_iterator rbegin() const noexcept {
+ return reverse_iterator(end());
+ }
+ constexpr reverse_iterator rend() const noexcept {
+ return reverse_iterator(begin());
+ }
+
+ constexpr const_reverse_iterator crbegin() const noexcept {
+ return const_reverse_iterator(cend());
+ }
+ constexpr const_reverse_iterator crend() const noexcept {
+ return const_reverse_iterator(cbegin());
+ }
+
+ private:
+ T* data_;
+};
+
+// span<T, Extent>::extent can not be declared inline prior to C++17, hence this
+// definition is required.
+template <class T, size_t Extent>
+constexpr size_t span<T, Extent>::extent;
+
+// [span.objectrep], views of object representation
+template <typename T, size_t X>
+span<const uint8_t, (X == dynamic_extent ? dynamic_extent : sizeof(T) * X)>
+as_bytes(span<T, X> s) noexcept {
+ return {reinterpret_cast<const uint8_t*>(s.data()), s.size_bytes()};
+}
+
+template <typename T,
+ size_t X,
+ typename = std::enable_if_t<!std::is_const<T>::value>>
+span<uint8_t, (X == dynamic_extent ? dynamic_extent : sizeof(T) * X)>
+as_writable_bytes(span<T, X> s) noexcept {
+ return {reinterpret_cast<uint8_t*>(s.data()), s.size_bytes()};
+}
+
+// Type-deducing helpers for constructing a span.
+template <int&... ExplicitArgumentBarrier, typename T>
+constexpr span<T> make_span(T* data, size_t size) noexcept {
+ return {data, size};
+}
+
+template <int&... ExplicitArgumentBarrier, typename T>
+constexpr span<T> make_span(T* begin, T* end) noexcept {
+ return {begin, end};
+}
+
+// make_span utility function that deduces both the span's value_type and extent
+// from the passed in argument.
+//
+// Usage: auto span = base::make_span(...);
+template <int&... ExplicitArgumentBarrier, typename Container>
+constexpr auto make_span(Container&& container) noexcept {
+ using T =
+ std::remove_pointer_t<decltype(base::data(std::declval<Container>()))>;
+ using Extent = internal::Extent<Container>;
+ return span<T, Extent::value>(std::forward<Container>(container));
+}
+
+// make_span utility function that allows callers to explicit specify the span's
+// extent, the value_type is deduced automatically. This is useful when passing
+// a dynamically sized container to a method expecting static spans, when the
+// container is known to have the correct size.
+//
+// Note: This will CHECK that N indeed matches size(container).
+//
+// Usage: auto static_span = base::make_span<N>(...);
+template <size_t N, int&... ExplicitArgumentBarrier, typename Container>
+constexpr auto make_span(Container&& container) noexcept {
+ using T =
+ std::remove_pointer_t<decltype(base::data(std::declval<Container>()))>;
+ return span<T, N>(base::data(container), base::size(container));
+}
+
+} // namespace base
+
+// Note: std::tuple_size, std::tuple_element and std::get are specialized for
+// static spans, so that they can be used in C++17's structured bindings. While
+// we don't support C++17 yet, there is no harm in providing these
+// specializations already.
+namespace std {
+
+// [span.tuple], tuple interface
+#if defined(__clang__)
+// Due to https://llvm.org/PR39871 and https://llvm.org/PR41331 and their
+// respective fixes different versions of libc++ declare std::tuple_size and
+// std::tuple_element either as classes or structs. In order to be able to
+// specialize std::tuple_size and std::tuple_element for custom base types we
+// thus need to disable -Wmismatched-tags in order to support all build
+// configurations. Note that this is blessed by the standard in
+// https://timsong-cpp.github.io/cppwp/n4140/dcl.type.elab#3.
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wmismatched-tags"
+#endif
+template <typename T, size_t X>
+struct tuple_size<base::span<T, X>> : public integral_constant<size_t, X> {};
+
+template <typename T>
+struct tuple_size<base::span<T, base::dynamic_extent>>; // not defined
+
+template <size_t I, typename T, size_t X>
+struct tuple_element<I, base::span<T, X>> {
+ static_assert(
+ base::dynamic_extent != X,
+ "std::tuple_element<> not supported for base::span<T, dynamic_extent>");
+ static_assert(I < X,
+ "Index out of bounds in std::tuple_element<> (base::span)");
+ using type = T;
+};
+#if defined(__clang__)
+#pragma clang diagnostic pop // -Wmismatched-tags
+#endif
+
+template <size_t I, typename T, size_t X>
+constexpr T& get(base::span<T, X> s) noexcept {
+ static_assert(base::dynamic_extent != X,
+ "std::get<> not supported for base::span<T, dynamic_extent>");
+ static_assert(I < X, "Index out of bounds in std::get<> (base::span)");
+ return s[I];
+}
+
+} // namespace std
+
+#endif // BASE_CONTAINERS_SPAN_H_
diff --git a/security/sandbox/chromium/base/containers/stack.h b/security/sandbox/chromium/base/containers/stack.h
new file mode 100644
index 0000000000..5cf06f8251
--- /dev/null
+++ b/security/sandbox/chromium/base/containers/stack.h
@@ -0,0 +1,23 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CONTAINERS_STACK_H_
+#define BASE_CONTAINERS_STACK_H_
+
+#include <stack>
+
+#include "base/containers/circular_deque.h"
+
+namespace base {
+
+// Provides a definition of base::stack that's like std::stack but uses a
+// base::circular_deque instead of std::deque. Since std::stack is just a
+// wrapper for an underlying type, we can just provide a typedef for it that
+// defaults to the base circular_deque.
+template <class T, class Container = circular_deque<T>>
+using stack = std::stack<T, Container>;
+
+} // namespace base
+
+#endif // BASE_CONTAINERS_STACK_H_
diff --git a/security/sandbox/chromium/base/containers/util.h b/security/sandbox/chromium/base/containers/util.h
new file mode 100644
index 0000000000..435db0d1f8
--- /dev/null
+++ b/security/sandbox/chromium/base/containers/util.h
@@ -0,0 +1,21 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CONTAINERS_UTIL_H_
+#define BASE_CONTAINERS_UTIL_H_
+
+#include <stdint.h>
+
+namespace base {
+
+// TODO(crbug.com/817982): What we really need is for checked_math.h to be
+// able to do checked arithmetic on pointers.
+template <typename T>
+static inline uintptr_t get_uintptr(const T* t) {
+ return reinterpret_cast<uintptr_t>(t);
+}
+
+} // namespace base
+
+#endif // BASE_CONTAINERS_UTIL_H_
diff --git a/security/sandbox/chromium/base/containers/vector_buffer.h b/security/sandbox/chromium/base/containers/vector_buffer.h
new file mode 100644
index 0000000000..83cd2ac139
--- /dev/null
+++ b/security/sandbox/chromium/base/containers/vector_buffer.h
@@ -0,0 +1,188 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CONTAINERS_VECTOR_BUFFERS_H_
+#define BASE_CONTAINERS_VECTOR_BUFFERS_H_
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <type_traits>
+#include <utility>
+
+#include "base/containers/util.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/numerics/checked_math.h"
+
+namespace base {
+namespace internal {
+
+// Internal implementation detail of base/containers.
+//
+// Implements a vector-like buffer that holds a certain capacity of T. Unlike
+// std::vector, VectorBuffer never constructs or destructs its arguments, and
+// can't change sizes. But it does implement templates to assist in efficient
+// moving and destruction of those items manually.
+//
+// In particular, the destructor function does not iterate over the items if
+// there is no destructor. Moves should be implemented as a memcpy/memmove for
+// trivially copyable objects (POD) otherwise, it should be a std::move if
+// possible, and as a last resort it falls back to a copy. This behavior is
+// similar to std::vector.
+//
+// No special consideration is done for noexcept move constructors since
+// we compile without exceptions.
+//
+// The current API does not support moving overlapping ranges.
+template <typename T>
+class VectorBuffer {
+ public:
+ constexpr VectorBuffer() = default;
+
+#if defined(__clang__) && !defined(__native_client__)
+ // This constructor converts an uninitialized void* to a T* which triggers
+ // clang Control Flow Integrity. Since this is as-designed, disable.
+ __attribute__((no_sanitize("cfi-unrelated-cast", "vptr")))
+#endif
+ VectorBuffer(size_t count)
+ : buffer_(reinterpret_cast<T*>(
+ malloc(CheckMul(sizeof(T), count).ValueOrDie()))),
+ capacity_(count) {
+ }
+ VectorBuffer(VectorBuffer&& other) noexcept
+ : buffer_(other.buffer_), capacity_(other.capacity_) {
+ other.buffer_ = nullptr;
+ other.capacity_ = 0;
+ }
+
+ ~VectorBuffer() { free(buffer_); }
+
+ VectorBuffer& operator=(VectorBuffer&& other) {
+ free(buffer_);
+ buffer_ = other.buffer_;
+ capacity_ = other.capacity_;
+
+ other.buffer_ = nullptr;
+ other.capacity_ = 0;
+ return *this;
+ }
+
+ size_t capacity() const { return capacity_; }
+
+ T& operator[](size_t i) {
+ // TODO(crbug.com/817982): Some call sites (at least circular_deque.h) are
+ // calling this with `i == capacity_` as a way of getting `end()`. Therefore
+ // we have to allow this for now (`i <= capacity_`), until we fix those call
+ // sites to use real iterators. This comment applies here and to `const T&
+ // operator[]`, below.
+ CHECK_LE(i, capacity_);
+ return buffer_[i];
+ }
+
+ const T& operator[](size_t i) const {
+ CHECK_LE(i, capacity_);
+ return buffer_[i];
+ }
+
+ T* begin() { return buffer_; }
+ T* end() { return &buffer_[capacity_]; }
+
+ // DestructRange ------------------------------------------------------------
+
+ // Trivially destructible objects need not have their destructors called.
+ template <typename T2 = T,
+ typename std::enable_if<std::is_trivially_destructible<T2>::value,
+ int>::type = 0>
+ void DestructRange(T* begin, T* end) {}
+
+ // Non-trivially destructible objects must have their destructors called
+ // individually.
+ template <typename T2 = T,
+ typename std::enable_if<!std::is_trivially_destructible<T2>::value,
+ int>::type = 0>
+ void DestructRange(T* begin, T* end) {
+ CHECK_LE(begin, end);
+ while (begin != end) {
+ begin->~T();
+ begin++;
+ }
+ }
+
+ // MoveRange ----------------------------------------------------------------
+ //
+ // The destructor will be called (as necessary) for all moved types. The
+ // ranges must not overlap.
+ //
+ // The parameters and begin and end (one past the last) of the input buffer,
+ // and the address of the first element to copy to. There must be sufficient
+ // room in the destination for all items in the range [begin, end).
+
+ // Trivially copyable types can use memcpy. trivially copyable implies
+ // that there is a trivial destructor as we don't have to call it.
+ template <typename T2 = T,
+ typename std::enable_if<base::is_trivially_copyable<T2>::value,
+ int>::type = 0>
+ static void MoveRange(T* from_begin, T* from_end, T* to) {
+ CHECK(!RangesOverlap(from_begin, from_end, to));
+ memcpy(
+ to, from_begin,
+ CheckSub(get_uintptr(from_end), get_uintptr(from_begin)).ValueOrDie());
+ }
+
+ // Not trivially copyable, but movable: call the move constructor and
+ // destruct the original.
+ template <typename T2 = T,
+ typename std::enable_if<std::is_move_constructible<T2>::value &&
+ !base::is_trivially_copyable<T2>::value,
+ int>::type = 0>
+ static void MoveRange(T* from_begin, T* from_end, T* to) {
+ CHECK(!RangesOverlap(from_begin, from_end, to));
+ while (from_begin != from_end) {
+ new (to) T(std::move(*from_begin));
+ from_begin->~T();
+ from_begin++;
+ to++;
+ }
+ }
+
+ // Not movable, not trivially copyable: call the copy constructor and
+ // destruct the original.
+ template <typename T2 = T,
+ typename std::enable_if<!std::is_move_constructible<T2>::value &&
+ !base::is_trivially_copyable<T2>::value,
+ int>::type = 0>
+ static void MoveRange(T* from_begin, T* from_end, T* to) {
+ CHECK(!RangesOverlap(from_begin, from_end, to));
+ while (from_begin != from_end) {
+ new (to) T(*from_begin);
+ from_begin->~T();
+ from_begin++;
+ to++;
+ }
+ }
+
+ private:
+ static bool RangesOverlap(const T* from_begin,
+ const T* from_end,
+ const T* to) {
+ const auto from_begin_uintptr = get_uintptr(from_begin);
+ const auto from_end_uintptr = get_uintptr(from_end);
+ const auto to_uintptr = get_uintptr(to);
+ return !(
+ to >= from_end ||
+ CheckAdd(to_uintptr, CheckSub(from_end_uintptr, from_begin_uintptr))
+ .ValueOrDie() <= from_begin_uintptr);
+ }
+
+ T* buffer_ = nullptr;
+ size_t capacity_ = 0;
+
+ DISALLOW_COPY_AND_ASSIGN(VectorBuffer);
+};
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_CONTAINERS_VECTOR_BUFFERS_H_
diff --git a/security/sandbox/chromium/base/cpu.cc b/security/sandbox/chromium/base/cpu.cc
new file mode 100644
index 0000000000..8cc2028a5f
--- /dev/null
+++ b/security/sandbox/chromium/base/cpu.cc
@@ -0,0 +1,312 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/cpu.h"
+
+#include <limits.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <algorithm>
+#include <utility>
+
+#include "base/stl_util.h"
+
+#if defined(ARCH_CPU_ARM_FAMILY) && (defined(OS_ANDROID) || defined(OS_LINUX))
+#include "base/files/file_util.h"
+#endif
+
+#if defined(ARCH_CPU_X86_FAMILY)
+#if defined(COMPILER_MSVC)
+#include <intrin.h>
+#include <immintrin.h> // For _xgetbv()
+#endif
+#endif
+
+namespace base {
+
+#if defined(ARCH_CPU_X86_FAMILY)
+namespace internal {
+
+std::tuple<int, int, int, int> ComputeX86FamilyAndModel(
+ const std::string& vendor,
+ int signature) {
+ int family = (signature >> 8) & 0xf;
+ int model = (signature >> 4) & 0xf;
+ int ext_family = 0;
+ int ext_model = 0;
+
+ // The "Intel 64 and IA-32 Architectures Developer's Manual: Vol. 2A"
+ // specifies the Extended Model is defined only when the Base Family is
+ // 06h or 0Fh.
+ // The "AMD CPUID Specification" specifies that the Extended Model is
+ // defined only when Base Family is 0Fh.
+ // Both manuals define the display model as
+ // {ExtendedModel[3:0],BaseModel[3:0]} in that case.
+ if (family == 0xf || (family == 0x6 && vendor == "GenuineIntel")) {
+ ext_model = (signature >> 16) & 0xf;
+ model += ext_model << 4;
+ }
+ // Both the "Intel 64 and IA-32 Architectures Developer's Manual: Vol. 2A"
+ // and the "AMD CPUID Specification" specify that the Extended Family is
+ // defined only when the Base Family is 0Fh.
+ // Both manuals define the display family as {0000b,BaseFamily[3:0]} +
+ // ExtendedFamily[7:0] in that case.
+ if (family == 0xf) {
+ ext_family = (signature >> 20) & 0xff;
+ family += ext_family;
+ }
+
+ return {family, model, ext_family, ext_model};
+}
+
+} // namespace internal
+#endif // defined(ARCH_CPU_X86_FAMILY)
+
+CPU::CPU()
+ : signature_(0),
+ type_(0),
+ family_(0),
+ model_(0),
+ stepping_(0),
+ ext_model_(0),
+ ext_family_(0),
+ has_mmx_(false),
+ has_sse_(false),
+ has_sse2_(false),
+ has_sse3_(false),
+ has_ssse3_(false),
+ has_sse41_(false),
+ has_sse42_(false),
+ has_popcnt_(false),
+ has_avx_(false),
+ has_avx2_(false),
+ has_aesni_(false),
+ has_non_stop_time_stamp_counter_(false),
+ is_running_in_vm_(false),
+ cpu_vendor_("unknown") {
+ Initialize();
+}
+
+namespace {
+
+#if defined(ARCH_CPU_X86_FAMILY)
+#if !defined(COMPILER_MSVC)
+
+#if defined(__pic__) && defined(__i386__)
+
+void __cpuid(int cpu_info[4], int info_type) {
+ __asm__ volatile(
+ "mov %%ebx, %%edi\n"
+ "cpuid\n"
+ "xchg %%edi, %%ebx\n"
+ : "=a"(cpu_info[0]), "=D"(cpu_info[1]), "=c"(cpu_info[2]),
+ "=d"(cpu_info[3])
+ : "a"(info_type), "c"(0));
+}
+
+#else
+
+void __cpuid(int cpu_info[4], int info_type) {
+ __asm__ volatile("cpuid\n"
+ : "=a"(cpu_info[0]), "=b"(cpu_info[1]), "=c"(cpu_info[2]),
+ "=d"(cpu_info[3])
+ : "a"(info_type), "c"(0));
+}
+
+#endif
+#endif // !defined(COMPILER_MSVC)
+
+// xgetbv returns the value of an Intel Extended Control Register (XCR).
+// Currently only XCR0 is defined by Intel so |xcr| should always be zero.
+uint64_t xgetbv(uint32_t xcr) {
+#if defined(COMPILER_MSVC)
+ return _xgetbv(xcr);
+#else
+ uint32_t eax, edx;
+
+ __asm__ volatile (
+ "xgetbv" : "=a"(eax), "=d"(edx) : "c"(xcr));
+ return (static_cast<uint64_t>(edx) << 32) | eax;
+#endif // defined(COMPILER_MSVC)
+}
+
+#endif // ARCH_CPU_X86_FAMILY
+
+#if defined(ARCH_CPU_ARM_FAMILY) && (defined(OS_ANDROID) || defined(OS_LINUX))
+std::string* CpuInfoBrand() {
+ static std::string* brand = []() {
+ // This function finds the value from /proc/cpuinfo under the key "model
+ // name" or "Processor". "model name" is used in Linux 3.8 and later (3.7
+ // and later for arm64) and is shown once per CPU. "Processor" is used in
+ // earler versions and is shown only once at the top of /proc/cpuinfo
+ // regardless of the number CPUs.
+ const char kModelNamePrefix[] = "model name\t: ";
+ const char kProcessorPrefix[] = "Processor\t: ";
+
+ std::string contents;
+ ReadFileToString(FilePath("/proc/cpuinfo"), &contents);
+ DCHECK(!contents.empty());
+
+ std::istringstream iss(contents);
+ std::string line;
+ while (std::getline(iss, line)) {
+ if (line.compare(0, strlen(kModelNamePrefix), kModelNamePrefix) == 0)
+ return new std::string(line.substr(strlen(kModelNamePrefix)));
+ if (line.compare(0, strlen(kProcessorPrefix), kProcessorPrefix) == 0)
+ return new std::string(line.substr(strlen(kProcessorPrefix)));
+ }
+
+ return new std::string();
+ }();
+
+ return brand;
+}
+#endif // defined(ARCH_CPU_ARM_FAMILY) && (defined(OS_ANDROID) ||
+ // defined(OS_LINUX))
+
+} // namespace
+
+void CPU::Initialize() {
+#if defined(ARCH_CPU_X86_FAMILY)
+ int cpu_info[4] = {-1};
+ // This array is used to temporarily hold the vendor name and then the brand
+ // name. Thus it has to be big enough for both use cases. There are
+ // static_asserts below for each of the use cases to make sure this array is
+ // big enough.
+ char cpu_string[sizeof(cpu_info) * 3 + 1];
+
+ // __cpuid with an InfoType argument of 0 returns the number of
+ // valid Ids in CPUInfo[0] and the CPU identification string in
+ // the other three array elements. The CPU identification string is
+ // not in linear order. The code below arranges the information
+ // in a human readable form. The human readable order is CPUInfo[1] |
+ // CPUInfo[3] | CPUInfo[2]. CPUInfo[2] and CPUInfo[3] are swapped
+ // before using memcpy() to copy these three array elements to |cpu_string|.
+ __cpuid(cpu_info, 0);
+ int num_ids = cpu_info[0];
+ std::swap(cpu_info[2], cpu_info[3]);
+ static constexpr size_t kVendorNameSize = 3 * sizeof(cpu_info[1]);
+ static_assert(kVendorNameSize < base::size(cpu_string),
+ "cpu_string too small");
+ memcpy(cpu_string, &cpu_info[1], kVendorNameSize);
+ cpu_string[kVendorNameSize] = '\0';
+ cpu_vendor_ = cpu_string;
+
+ // Interpret CPU feature information.
+ if (num_ids > 0) {
+ int cpu_info7[4] = {0};
+ __cpuid(cpu_info, 1);
+ if (num_ids >= 7) {
+ __cpuid(cpu_info7, 7);
+ }
+ signature_ = cpu_info[0];
+ stepping_ = cpu_info[0] & 0xf;
+ type_ = (cpu_info[0] >> 12) & 0x3;
+ std::tie(family_, model_, ext_family_, ext_model_) =
+ internal::ComputeX86FamilyAndModel(cpu_vendor_, signature_);
+ has_mmx_ = (cpu_info[3] & 0x00800000) != 0;
+ has_sse_ = (cpu_info[3] & 0x02000000) != 0;
+ has_sse2_ = (cpu_info[3] & 0x04000000) != 0;
+ has_sse3_ = (cpu_info[2] & 0x00000001) != 0;
+ has_ssse3_ = (cpu_info[2] & 0x00000200) != 0;
+ has_sse41_ = (cpu_info[2] & 0x00080000) != 0;
+ has_sse42_ = (cpu_info[2] & 0x00100000) != 0;
+ has_popcnt_ = (cpu_info[2] & 0x00800000) != 0;
+
+ // "Hypervisor Present Bit: Bit 31 of ECX of CPUID leaf 0x1."
+ // See https://lwn.net/Articles/301888/
+ // This is checking for any hypervisor. Hypervisors may choose not to
+ // announce themselves. Hypervisors trap CPUID and sometimes return
+ // different results to underlying hardware.
+ is_running_in_vm_ = (cpu_info[2] & 0x80000000) != 0;
+
+ // AVX instructions will generate an illegal instruction exception unless
+ // a) they are supported by the CPU,
+ // b) XSAVE is supported by the CPU and
+ // c) XSAVE is enabled by the kernel.
+ // See http://software.intel.com/en-us/blogs/2011/04/14/is-avx-enabled
+ //
+ // In addition, we have observed some crashes with the xgetbv instruction
+ // even after following Intel's example code. (See crbug.com/375968.)
+ // Because of that, we also test the XSAVE bit because its description in
+ // the CPUID documentation suggests that it signals xgetbv support.
+ has_avx_ =
+ (cpu_info[2] & 0x10000000) != 0 &&
+ (cpu_info[2] & 0x04000000) != 0 /* XSAVE */ &&
+ (cpu_info[2] & 0x08000000) != 0 /* OSXSAVE */ &&
+ (xgetbv(0) & 6) == 6 /* XSAVE enabled by kernel */;
+ has_aesni_ = (cpu_info[2] & 0x02000000) != 0;
+ has_avx2_ = has_avx_ && (cpu_info7[1] & 0x00000020) != 0;
+ }
+
+ // Get the brand string of the cpu.
+ __cpuid(cpu_info, 0x80000000);
+ const int max_parameter = cpu_info[0];
+
+ static constexpr int kParameterStart = 0x80000002;
+ static constexpr int kParameterEnd = 0x80000004;
+ static constexpr int kParameterSize = kParameterEnd - kParameterStart + 1;
+ static_assert(kParameterSize * sizeof(cpu_info) + 1 == base::size(cpu_string),
+ "cpu_string has wrong size");
+
+ if (max_parameter >= kParameterEnd) {
+ size_t i = 0;
+ for (int parameter = kParameterStart; parameter <= kParameterEnd;
+ ++parameter) {
+ __cpuid(cpu_info, parameter);
+ memcpy(&cpu_string[i], cpu_info, sizeof(cpu_info));
+ i += sizeof(cpu_info);
+ }
+ cpu_string[i] = '\0';
+ cpu_brand_ = cpu_string;
+ }
+
+ static constexpr int kParameterContainingNonStopTimeStampCounter = 0x80000007;
+ if (max_parameter >= kParameterContainingNonStopTimeStampCounter) {
+ __cpuid(cpu_info, kParameterContainingNonStopTimeStampCounter);
+ has_non_stop_time_stamp_counter_ = (cpu_info[3] & (1 << 8)) != 0;
+ }
+
+ if (!has_non_stop_time_stamp_counter_ && is_running_in_vm_) {
+ int cpu_info_hv[4] = {};
+ __cpuid(cpu_info_hv, 0x40000000);
+ if (cpu_info_hv[1] == 0x7263694D && // Micr
+ cpu_info_hv[2] == 0x666F736F && // osof
+ cpu_info_hv[3] == 0x76482074) { // t Hv
+ // If CPUID says we have a variant TSC and a hypervisor has identified
+ // itself and the hypervisor says it is Microsoft Hyper-V, then treat
+ // TSC as invariant.
+ //
+ // Microsoft Hyper-V hypervisor reports variant TSC as there are some
+ // scenarios (eg. VM live migration) where the TSC is variant, but for
+ // our purposes we can treat it as invariant.
+ has_non_stop_time_stamp_counter_ = true;
+ }
+ }
+#elif defined(ARCH_CPU_ARM_FAMILY)
+#if (defined(OS_ANDROID) || defined(OS_LINUX))
+ cpu_brand_ = *CpuInfoBrand();
+#elif defined(OS_WIN)
+ // Windows makes high-resolution thread timing information available in
+ // user-space.
+ has_non_stop_time_stamp_counter_ = true;
+#endif
+#endif
+}
+
+CPU::IntelMicroArchitecture CPU::GetIntelMicroArchitecture() const {
+ if (has_avx2()) return AVX2;
+ if (has_avx()) return AVX;
+ if (has_sse42()) return SSE42;
+ if (has_sse41()) return SSE41;
+ if (has_ssse3()) return SSSE3;
+ if (has_sse3()) return SSE3;
+ if (has_sse2()) return SSE2;
+ if (has_sse()) return SSE;
+ return PENTIUM;
+}
+
+} // namespace base
diff --git a/security/sandbox/chromium/base/cpu.h b/security/sandbox/chromium/base/cpu.h
new file mode 100644
index 0000000000..4fcda6904f
--- /dev/null
+++ b/security/sandbox/chromium/base/cpu.h
@@ -0,0 +1,104 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CPU_H_
+#define BASE_CPU_H_
+
+#include <string>
+#include <tuple>
+
+#include "base/base_export.h"
+#include "build/build_config.h"
+
+namespace base {
+
+#if defined(ARCH_CPU_X86_FAMILY)
+namespace internal {
+
+// Compute the CPU family and model based on the vendor and CPUID signature.
+// Returns in order: family, model, extended family, extended model.
+BASE_EXPORT std::tuple<int, int, int, int> ComputeX86FamilyAndModel(
+ const std::string& vendor,
+ int signature);
+
+} // namespace internal
+#endif // defined(ARCH_CPU_X86_FAMILY)
+
+// Query information about the processor.
+class BASE_EXPORT CPU final {
+ public:
+ CPU();
+
+ enum IntelMicroArchitecture {
+ PENTIUM,
+ SSE,
+ SSE2,
+ SSE3,
+ SSSE3,
+ SSE41,
+ SSE42,
+ AVX,
+ AVX2,
+ MAX_INTEL_MICRO_ARCHITECTURE
+ };
+
+ // Accessors for CPU information.
+ const std::string& vendor_name() const { return cpu_vendor_; }
+ int signature() const { return signature_; }
+ int stepping() const { return stepping_; }
+ int model() const { return model_; }
+ int family() const { return family_; }
+ int type() const { return type_; }
+ int extended_model() const { return ext_model_; }
+ int extended_family() const { return ext_family_; }
+ bool has_mmx() const { return has_mmx_; }
+ bool has_sse() const { return has_sse_; }
+ bool has_sse2() const { return has_sse2_; }
+ bool has_sse3() const { return has_sse3_; }
+ bool has_ssse3() const { return has_ssse3_; }
+ bool has_sse41() const { return has_sse41_; }
+ bool has_sse42() const { return has_sse42_; }
+ bool has_popcnt() const { return has_popcnt_; }
+ bool has_avx() const { return has_avx_; }
+ bool has_avx2() const { return has_avx2_; }
+ bool has_aesni() const { return has_aesni_; }
+ bool has_non_stop_time_stamp_counter() const {
+ return has_non_stop_time_stamp_counter_;
+ }
+ bool is_running_in_vm() const { return is_running_in_vm_; }
+
+ IntelMicroArchitecture GetIntelMicroArchitecture() const;
+ const std::string& cpu_brand() const { return cpu_brand_; }
+
+ private:
+ // Query the processor for CPUID information.
+ void Initialize();
+
+ int signature_; // raw form of type, family, model, and stepping
+ int type_; // process type
+ int family_; // family of the processor
+ int model_; // model of processor
+ int stepping_; // processor revision number
+ int ext_model_;
+ int ext_family_;
+ bool has_mmx_;
+ bool has_sse_;
+ bool has_sse2_;
+ bool has_sse3_;
+ bool has_ssse3_;
+ bool has_sse41_;
+ bool has_sse42_;
+ bool has_popcnt_;
+ bool has_avx_;
+ bool has_avx2_;
+ bool has_aesni_;
+ bool has_non_stop_time_stamp_counter_;
+ bool is_running_in_vm_;
+ std::string cpu_vendor_;
+ std::string cpu_brand_;
+};
+
+} // namespace base
+
+#endif // BASE_CPU_H_
diff --git a/security/sandbox/chromium/base/debug/alias.cc b/security/sandbox/chromium/base/debug/alias.cc
new file mode 100644
index 0000000000..f808c50345
--- /dev/null
+++ b/security/sandbox/chromium/base/debug/alias.cc
@@ -0,0 +1,16 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/debug/alias.h"
+#include "build/build_config.h"
+
+namespace base {
+namespace debug {
+
+// This file/function should be excluded from LTO/LTCG to ensure that the
+// compiler can't see this function's implementation when compiling calls to it.
+NOINLINE void Alias(const void* var) {}
+
+} // namespace debug
+} // namespace base
diff --git a/security/sandbox/chromium/base/debug/alias.h b/security/sandbox/chromium/base/debug/alias.h
new file mode 100644
index 0000000000..35af2c23bf
--- /dev/null
+++ b/security/sandbox/chromium/base/debug/alias.h
@@ -0,0 +1,44 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_DEBUG_ALIAS_H_
+#define BASE_DEBUG_ALIAS_H_
+
+#include "base/base_export.h"
+#include "base/stl_util.h"
+#include "base/strings/string_util.h"
+
+namespace base {
+namespace debug {
+
+// Make the optimizer think that var is aliased. This is to prevent it from
+// optimizing out local variables that would not otherwise be live at the point
+// of a potential crash.
+// base::debug::Alias should only be used for local variables, not globals,
+// object members, or function return values - these must be copied to locals if
+// you want to ensure they are recorded in crash dumps.
+// Note that if the local variable is a pointer then its value will be retained
+// but the memory that it points to will probably not be saved in the crash
+// dump - by default only stack memory is saved. Therefore the aliasing
+// technique is usually only worthwhile with non-pointer variables. If you have
+// a pointer to an object and you want to retain the object's state you need to
+// copy the object or its fields to local variables. Example usage:
+// int last_error = err_;
+// base::debug::Alias(&last_error);
+// DEBUG_ALIAS_FOR_CSTR(name_copy, p->name, 16);
+// CHECK(false);
+void BASE_EXPORT Alias(const void* var);
+
+} // namespace debug
+} // namespace base
+
+// Convenience macro that copies the null-terminated string from |c_str| into a
+// stack-allocated char array named |var_name| that holds up to |char_count|
+// characters and should be preserved in memory dumps.
+#define DEBUG_ALIAS_FOR_CSTR(var_name, c_str, char_count) \
+ char var_name[char_count]; \
+ ::base::strlcpy(var_name, (c_str), base::size(var_name)); \
+ ::base::debug::Alias(var_name);
+
+#endif // BASE_DEBUG_ALIAS_H_
diff --git a/security/sandbox/chromium/base/debug/crash_logging.h b/security/sandbox/chromium/base/debug/crash_logging.h
new file mode 100644
index 0000000000..9c6cd758da
--- /dev/null
+++ b/security/sandbox/chromium/base/debug/crash_logging.h
@@ -0,0 +1,104 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_DEBUG_CRASH_LOGGING_H_
+#define BASE_DEBUG_CRASH_LOGGING_H_
+
+#include <stddef.h>
+
+#include <memory>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/strings/string_piece.h"
+
+namespace base {
+namespace debug {
+
+// A crash key is an annotation that is carried along with a crash report, to
+// provide additional debugging information beyond a stack trace. Crash keys
+// have a name and a string value.
+//
+// The preferred API is //components/crash/core/common:crash_key, however not
+// all clients can hold a direct dependency on that target. The API provided
+// in this file indirects the dependency.
+//
+// Example usage:
+// static CrashKeyString* crash_key =
+// AllocateCrashKeyString("name", CrashKeySize::Size32);
+// SetCrashKeyString(crash_key, "value");
+// ClearCrashKeyString(crash_key);
+
+// The maximum length for a crash key's value must be one of the following
+// pre-determined values.
+enum class CrashKeySize {
+ Size32 = 32,
+ Size64 = 64,
+ Size256 = 256,
+};
+
+struct CrashKeyString;
+
+// Allocates a new crash key with the specified |name| with storage for a
+// value up to length |size|. This will return null if the crash key system is
+// not initialized.
+BASE_EXPORT CrashKeyString* AllocateCrashKeyString(const char name[],
+ CrashKeySize size);
+
+// Stores |value| into the specified |crash_key|. The |crash_key| may be null
+// if AllocateCrashKeyString() returned null. If |value| is longer than the
+// size with which the key was allocated, it will be truncated.
+BASE_EXPORT void SetCrashKeyString(CrashKeyString* crash_key,
+ base::StringPiece value);
+
+// Clears any value that was stored in |crash_key|. The |crash_key| may be
+// null.
+BASE_EXPORT void ClearCrashKeyString(CrashKeyString* crash_key);
+
+// A scoper that sets the specified key to value for the lifetime of the
+// object, and clears it on destruction.
+class BASE_EXPORT ScopedCrashKeyString {
+ public:
+ ScopedCrashKeyString(CrashKeyString* crash_key, base::StringPiece value);
+ ~ScopedCrashKeyString();
+
+ private:
+ CrashKeyString* const crash_key_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedCrashKeyString);
+};
+
+////////////////////////////////////////////////////////////////////////////////
+// The following declarations are used to initialize the crash key system
+// in //base by providing implementations for the above functions.
+
+// The virtual interface that provides the implementation for the crash key
+// API. This is implemented by a higher-layer component, and the instance is
+// set using the function below.
+class CrashKeyImplementation {
+ public:
+ virtual ~CrashKeyImplementation() = default;
+
+ virtual CrashKeyString* Allocate(const char name[], CrashKeySize size) = 0;
+ virtual void Set(CrashKeyString* crash_key, base::StringPiece value) = 0;
+ virtual void Clear(CrashKeyString* crash_key) = 0;
+};
+
+// Initializes the crash key system in base by replacing the existing
+// implementation, if it exists, with |impl|. The |impl| is copied into base.
+BASE_EXPORT void SetCrashKeyImplementation(
+ std::unique_ptr<CrashKeyImplementation> impl);
+
+// The base structure for a crash key, storing the allocation metadata.
+struct CrashKeyString {
+ constexpr CrashKeyString(const char name[], CrashKeySize size)
+ : name(name), size(size) {}
+ const char* const name;
+ const CrashKeySize size;
+};
+
+} // namespace debug
+} // namespace base
+
+#endif // BASE_DEBUG_CRASH_LOGGING_H_
diff --git a/security/sandbox/chromium/base/debug/debugger.h b/security/sandbox/chromium/base/debug/debugger.h
new file mode 100644
index 0000000000..efc9b40cb9
--- /dev/null
+++ b/security/sandbox/chromium/base/debug/debugger.h
@@ -0,0 +1,50 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This is a cross platform interface for helper functions related to
+// debuggers. You should use this to test if you're running under a debugger,
+// and if you would like to yield (breakpoint) into the debugger.
+
+#ifndef BASE_DEBUG_DEBUGGER_H_
+#define BASE_DEBUG_DEBUGGER_H_
+
+#include "base/base_export.h"
+
+namespace base {
+namespace debug {
+
+// Waits wait_seconds seconds for a debugger to attach to the current process.
+// When silent is false, an exception is thrown when a debugger is detected.
+BASE_EXPORT bool WaitForDebugger(int wait_seconds, bool silent);
+
+// Returns true if the given process is being run under a debugger.
+//
+// On OS X, the underlying mechanism doesn't work when the sandbox is enabled.
+// To get around this, this function caches its value.
+//
+// WARNING: Because of this, on OS X, a call MUST be made to this function
+// BEFORE the sandbox is enabled.
+BASE_EXPORT bool BeingDebugged();
+
+// Break into the debugger, assumes a debugger is present.
+BASE_EXPORT void BreakDebugger();
+
+// Used in test code, this controls whether showing dialogs and breaking into
+// the debugger is suppressed for debug errors, even in debug mode (normally
+// release mode doesn't do this stuff -- this is controlled separately).
+// Normally UI is not suppressed. This is normally used when running automated
+// tests where we want a crash rather than a dialog or a debugger.
+BASE_EXPORT void SetSuppressDebugUI(bool suppress);
+BASE_EXPORT bool IsDebugUISuppressed();
+
+// If a debugger is present, verifies that it is properly set up, and DCHECK()s
+// if misconfigured. Currently only verifies that //tools/gdb/gdbinit has been
+// sourced when using gdb on Linux and //tools/lldb/lldbinit.py has been sourced
+// when using lldb on macOS.
+BASE_EXPORT void VerifyDebugger();
+
+} // namespace debug
+} // namespace base
+
+#endif // BASE_DEBUG_DEBUGGER_H_
diff --git a/security/sandbox/chromium/base/debug/leak_annotations.h b/security/sandbox/chromium/base/debug/leak_annotations.h
new file mode 100644
index 0000000000..dc502461d0
--- /dev/null
+++ b/security/sandbox/chromium/base/debug/leak_annotations.h
@@ -0,0 +1,46 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_DEBUG_LEAK_ANNOTATIONS_H_
+#define BASE_DEBUG_LEAK_ANNOTATIONS_H_
+
+#include "base/macros.h"
+#include "build/build_config.h"
+
+// This file defines macros which can be used to annotate intentional memory
+// leaks. Support for annotations is implemented in LeakSanitizer. Annotated
+// objects will be treated as a source of live pointers, i.e. any heap objects
+// reachable by following pointers from an annotated object will not be
+// reported as leaks.
+//
+// ANNOTATE_SCOPED_MEMORY_LEAK: all allocations made in the current scope
+// will be annotated as leaks.
+// ANNOTATE_LEAKING_OBJECT_PTR(X): the heap object referenced by pointer X will
+// be annotated as a leak.
+
+#if defined(LEAK_SANITIZER) && !defined(OS_NACL)
+
+#include <sanitizer/lsan_interface.h>
+
+class ScopedLeakSanitizerDisabler {
+ public:
+ ScopedLeakSanitizerDisabler() { __lsan_disable(); }
+ ~ScopedLeakSanitizerDisabler() { __lsan_enable(); }
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ScopedLeakSanitizerDisabler);
+};
+
+#define ANNOTATE_SCOPED_MEMORY_LEAK \
+ ScopedLeakSanitizerDisabler leak_sanitizer_disabler; static_cast<void>(0)
+
+#define ANNOTATE_LEAKING_OBJECT_PTR(X) __lsan_ignore_object(X);
+
+#else
+
+#define ANNOTATE_SCOPED_MEMORY_LEAK ((void)0)
+#define ANNOTATE_LEAKING_OBJECT_PTR(X) ((void)0)
+
+#endif
+
+#endif // BASE_DEBUG_LEAK_ANNOTATIONS_H_
diff --git a/security/sandbox/chromium/base/debug/profiler.cc b/security/sandbox/chromium/base/debug/profiler.cc
new file mode 100644
index 0000000000..3530d61d07
--- /dev/null
+++ b/security/sandbox/chromium/base/debug/profiler.cc
@@ -0,0 +1,180 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/debug/profiler.h"
+
+#include <string>
+
+#include "base/allocator/buildflags.h"
+#include "base/debug/debugging_buildflags.h"
+#include "base/process/process_handle.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_util.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include "base/win/current_module.h"
+#include "base/win/pe_image.h"
+#endif // defined(OS_WIN)
+
+// TODO(peria): Enable profiling on Windows.
+#if BUILDFLAG(ENABLE_PROFILING) && BUILDFLAG(USE_TCMALLOC) && !defined(OS_WIN)
+#include "third_party/tcmalloc/chromium/src/gperftools/profiler.h"
+#endif
+
+namespace base {
+namespace debug {
+
+// TODO(peria): Enable profiling on Windows.
+#if BUILDFLAG(ENABLE_PROFILING) && BUILDFLAG(USE_TCMALLOC) && !defined(OS_WIN)
+
+static int profile_count = 0;
+
+void StartProfiling(const std::string& name) {
+ ++profile_count;
+ std::string full_name(name);
+ std::string pid = NumberToString(GetCurrentProcId());
+ std::string count = NumberToString(profile_count);
+ ReplaceSubstringsAfterOffset(&full_name, 0, "{pid}", pid);
+ ReplaceSubstringsAfterOffset(&full_name, 0, "{count}", count);
+ ProfilerStart(full_name.c_str());
+}
+
+void StopProfiling() {
+ ProfilerFlush();
+ ProfilerStop();
+}
+
+void FlushProfiling() {
+ ProfilerFlush();
+}
+
+bool BeingProfiled() {
+ return ProfilingIsEnabledForAllThreads();
+}
+
+void RestartProfilingAfterFork() {
+ ProfilerRegisterThread();
+}
+
+bool IsProfilingSupported() {
+ return true;
+}
+
+#else
+
+void StartProfiling(const std::string& name) {
+}
+
+void StopProfiling() {
+}
+
+void FlushProfiling() {
+}
+
+bool BeingProfiled() {
+ return false;
+}
+
+void RestartProfilingAfterFork() {
+}
+
+bool IsProfilingSupported() {
+ return false;
+}
+
+#endif
+
+#if !defined(OS_WIN)
+
+ReturnAddressLocationResolver GetProfilerReturnAddrResolutionFunc() {
+ return nullptr;
+}
+
+AddDynamicSymbol GetProfilerAddDynamicSymbolFunc() {
+ return nullptr;
+}
+
+MoveDynamicSymbol GetProfilerMoveDynamicSymbolFunc() {
+ return nullptr;
+}
+
+#else // defined(OS_WIN)
+
+namespace {
+
+struct FunctionSearchContext {
+ const char* name;
+ FARPROC function;
+};
+
+// Callback function to PEImage::EnumImportChunks.
+bool FindResolutionFunctionInImports(
+ const base::win::PEImage &image, const char* module_name,
+ PIMAGE_THUNK_DATA unused_name_table, PIMAGE_THUNK_DATA import_address_table,
+ PVOID cookie) {
+ FunctionSearchContext* context =
+ reinterpret_cast<FunctionSearchContext*>(cookie);
+
+ DCHECK(context);
+ DCHECK(!context->function);
+
+ // Our import address table contains pointers to the functions we import
+ // at this point. Let's retrieve the first such function and use it to
+ // find the module this import was resolved to by the loader.
+ const wchar_t* function_in_module =
+ reinterpret_cast<const wchar_t*>(import_address_table->u1.Function);
+
+ // Retrieve the module by a function in the module.
+ const DWORD kFlags = GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS |
+ GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT;
+ HMODULE module = NULL;
+ if (!::GetModuleHandleEx(kFlags, function_in_module, &module)) {
+ // This can happen if someone IAT patches us to a thunk.
+ return true;
+ }
+
+ // See whether this module exports the function we're looking for.
+ FARPROC exported_func = ::GetProcAddress(module, context->name);
+ if (exported_func != NULL) {
+ // We found it, return the function and terminate the enumeration.
+ context->function = exported_func;
+ return false;
+ }
+
+ // Keep going.
+ return true;
+}
+
+template <typename FunctionType>
+FunctionType FindFunctionInImports(const char* function_name) {
+ base::win::PEImage image(CURRENT_MODULE());
+
+ FunctionSearchContext ctx = { function_name, NULL };
+ image.EnumImportChunks(FindResolutionFunctionInImports, &ctx, nullptr);
+
+ return reinterpret_cast<FunctionType>(ctx.function);
+}
+
+} // namespace
+
+ReturnAddressLocationResolver GetProfilerReturnAddrResolutionFunc() {
+ return FindFunctionInImports<ReturnAddressLocationResolver>(
+ "ResolveReturnAddressLocation");
+}
+
+AddDynamicSymbol GetProfilerAddDynamicSymbolFunc() {
+ return FindFunctionInImports<AddDynamicSymbol>(
+ "AddDynamicSymbol");
+}
+
+MoveDynamicSymbol GetProfilerMoveDynamicSymbolFunc() {
+ return FindFunctionInImports<MoveDynamicSymbol>(
+ "MoveDynamicSymbol");
+}
+
+#endif // defined(OS_WIN)
+
+} // namespace debug
+} // namespace base
diff --git a/security/sandbox/chromium/base/debug/profiler.h b/security/sandbox/chromium/base/debug/profiler.h
new file mode 100644
index 0000000000..1229e06234
--- /dev/null
+++ b/security/sandbox/chromium/base/debug/profiler.h
@@ -0,0 +1,76 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_DEBUG_PROFILER_H_
+#define BASE_DEBUG_PROFILER_H_
+
+#include <stddef.h>
+
+#include <string>
+
+#include "base/base_export.h"
+
+// The Profiler functions allow usage of the underlying sampling based
+// profiler. If the application has not been built with the necessary
+// flags (-DENABLE_PROFILING and not -DNO_TCMALLOC) then these functions
+// are noops.
+namespace base {
+namespace debug {
+
+// Start profiling with the supplied name.
+// {pid} will be replaced by the process' pid and {count} will be replaced
+// by the count of the profile run (starts at 1 with each process).
+BASE_EXPORT void StartProfiling(const std::string& name);
+
+// Stop profiling and write out data.
+BASE_EXPORT void StopProfiling();
+
+// Force data to be written to file.
+BASE_EXPORT void FlushProfiling();
+
+// Returns true if process is being profiled.
+BASE_EXPORT bool BeingProfiled();
+
+// Reset profiling after a fork, which disables timers.
+BASE_EXPORT void RestartProfilingAfterFork();
+
+// Returns true iff this executable supports profiling.
+BASE_EXPORT bool IsProfilingSupported();
+
+// There's a class of profilers that use "return address swizzling" to get a
+// hook on function exits. This class of profilers uses some form of entry hook,
+// like e.g. binary instrumentation, or a compiler flag, that calls a hook each
+// time a function is invoked. The hook then switches the return address on the
+// stack for the address of an exit hook function, and pushes the original
+// return address to a shadow stack of some type. When in due course the CPU
+// executes a return to the exit hook, the exit hook will do whatever work it
+// does on function exit, then arrange to return to the original return address.
+// This class of profiler does not play well with programs that look at the
+// return address, as does e.g. V8. V8 uses the return address to certain
+// runtime functions to find the JIT code that called it, and from there finds
+// the V8 data structures associated to the JS function involved.
+// A return address resolution function is used to fix this. It allows such
+// programs to resolve a location on stack where a return address originally
+// resided, to the shadow stack location where the profiler stashed it.
+typedef uintptr_t (*ReturnAddressLocationResolver)(
+ uintptr_t return_addr_location);
+
+typedef void (*AddDynamicSymbol)(const void* address,
+ size_t length,
+ const char* name,
+ size_t name_len);
+typedef void (*MoveDynamicSymbol)(const void* address, const void* new_address);
+
+
+// If this binary is instrumented and the instrumentation supplies a function
+// for each of those purposes, find and return the function in question.
+// Otherwise returns NULL.
+BASE_EXPORT ReturnAddressLocationResolver GetProfilerReturnAddrResolutionFunc();
+BASE_EXPORT AddDynamicSymbol GetProfilerAddDynamicSymbolFunc();
+BASE_EXPORT MoveDynamicSymbol GetProfilerMoveDynamicSymbolFunc();
+
+} // namespace debug
+} // namespace base
+
+#endif // BASE_DEBUG_PROFILER_H_
diff --git a/security/sandbox/chromium/base/environment.cc b/security/sandbox/chromium/base/environment.cc
new file mode 100644
index 0000000000..8e3afd8983
--- /dev/null
+++ b/security/sandbox/chromium/base/environment.cc
@@ -0,0 +1,123 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/environment.h"
+
+#include "base/memory/ptr_util.h"
+#include "base/strings/string_piece.h"
+#include "base/strings/string_util.h"
+#include "base/strings/utf_string_conversions.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+#include <stdlib.h>
+#endif
+
+namespace base {
+
+namespace {
+
+class EnvironmentImpl : public Environment {
+ public:
+ bool GetVar(StringPiece variable_name, std::string* result) override {
+ if (GetVarImpl(variable_name, result))
+ return true;
+
+ // Some commonly used variable names are uppercase while others
+ // are lowercase, which is inconsistent. Let's try to be helpful
+ // and look for a variable name with the reverse case.
+ // I.e. HTTP_PROXY may be http_proxy for some users/systems.
+ char first_char = variable_name[0];
+ std::string alternate_case_var;
+ if (IsAsciiLower(first_char))
+ alternate_case_var = ToUpperASCII(variable_name);
+ else if (IsAsciiUpper(first_char))
+ alternate_case_var = ToLowerASCII(variable_name);
+ else
+ return false;
+ return GetVarImpl(alternate_case_var, result);
+ }
+
+ bool SetVar(StringPiece variable_name,
+ const std::string& new_value) override {
+ return SetVarImpl(variable_name, new_value);
+ }
+
+ bool UnSetVar(StringPiece variable_name) override {
+ return UnSetVarImpl(variable_name);
+ }
+
+ private:
+ bool GetVarImpl(StringPiece variable_name, std::string* result) {
+#if defined(OS_WIN)
+ DWORD value_length =
+ ::GetEnvironmentVariable(UTF8ToWide(variable_name).c_str(), nullptr, 0);
+ if (value_length == 0)
+ return false;
+ if (result) {
+ std::unique_ptr<wchar_t[]> value(new wchar_t[value_length]);
+ ::GetEnvironmentVariable(UTF8ToWide(variable_name).c_str(), value.get(),
+ value_length);
+ *result = WideToUTF8(value.get());
+ }
+ return true;
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+ const char* env_value = getenv(variable_name.data());
+ if (!env_value)
+ return false;
+ // Note that the variable may be defined but empty.
+ if (result)
+ *result = env_value;
+ return true;
+#endif
+ }
+
+ bool SetVarImpl(StringPiece variable_name, const std::string& new_value) {
+#if defined(OS_WIN)
+ // On success, a nonzero value is returned.
+ return !!SetEnvironmentVariable(UTF8ToWide(variable_name).c_str(),
+ UTF8ToWide(new_value).c_str());
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+ // On success, zero is returned.
+ return !setenv(variable_name.data(), new_value.c_str(), 1);
+#endif
+ }
+
+ bool UnSetVarImpl(StringPiece variable_name) {
+#if defined(OS_WIN)
+ // On success, a nonzero value is returned.
+ return !!SetEnvironmentVariable(UTF8ToWide(variable_name).c_str(), nullptr);
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+ // On success, zero is returned.
+ return !unsetenv(variable_name.data());
+#endif
+ }
+};
+
+} // namespace
+
+namespace env_vars {
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+// On Posix systems, this variable contains the location of the user's home
+// directory. (e.g, /home/username/).
+const char kHome[] = "HOME";
+#endif
+
+} // namespace env_vars
+
+Environment::~Environment() = default;
+
+// static
+std::unique_ptr<Environment> Environment::Create() {
+ return std::make_unique<EnvironmentImpl>();
+}
+
+bool Environment::HasVar(StringPiece variable_name) {
+ return GetVar(variable_name, nullptr);
+}
+
+} // namespace base
diff --git a/security/sandbox/chromium/base/environment.h b/security/sandbox/chromium/base/environment.h
new file mode 100644
index 0000000000..79529a2131
--- /dev/null
+++ b/security/sandbox/chromium/base/environment.h
@@ -0,0 +1,61 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ENVIRONMENT_H_
+#define BASE_ENVIRONMENT_H_
+
+#include <map>
+#include <memory>
+#include <string>
+
+#include "base/base_export.h"
+#include "base/strings/string16.h"
+#include "base/strings/string_piece.h"
+#include "build/build_config.h"
+
+namespace base {
+
+namespace env_vars {
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+BASE_EXPORT extern const char kHome[];
+#endif
+
+} // namespace env_vars
+
+class BASE_EXPORT Environment {
+ public:
+ virtual ~Environment();
+
+ // Returns the appropriate platform-specific instance.
+ static std::unique_ptr<Environment> Create();
+
+ // Gets an environment variable's value and stores it in |result|.
+ // Returns false if the key is unset.
+ virtual bool GetVar(StringPiece variable_name, std::string* result) = 0;
+
+ // Syntactic sugar for GetVar(variable_name, nullptr);
+ virtual bool HasVar(StringPiece variable_name);
+
+ // Returns true on success, otherwise returns false. This method should not
+ // be called in a multi-threaded process.
+ virtual bool SetVar(StringPiece variable_name,
+ const std::string& new_value) = 0;
+
+ // Returns true on success, otherwise returns false. This method should not
+ // be called in a multi-threaded process.
+ virtual bool UnSetVar(StringPiece variable_name) = 0;
+};
+
+#if defined(OS_WIN)
+using NativeEnvironmentString = std::wstring;
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+using NativeEnvironmentString = std::string;
+#endif
+using EnvironmentMap =
+ std::map<NativeEnvironmentString, NativeEnvironmentString>;
+
+} // namespace base
+
+#endif // BASE_ENVIRONMENT_H_
diff --git a/security/sandbox/chromium/base/file_descriptor_posix.h b/security/sandbox/chromium/base/file_descriptor_posix.h
new file mode 100644
index 0000000000..cb32dde355
--- /dev/null
+++ b/security/sandbox/chromium/base/file_descriptor_posix.h
@@ -0,0 +1,61 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FILE_DESCRIPTOR_POSIX_H_
+#define BASE_FILE_DESCRIPTOR_POSIX_H_
+
+#include "base/files/file.h"
+#include "base/files/scoped_file.h"
+
+namespace base {
+
+constexpr int kInvalidFd = -1;
+
+// -----------------------------------------------------------------------------
+// We introduct a special structure for file descriptors in order that we are
+// able to use template specialisation to special-case their handling.
+//
+// IMPORTANT: This is primarily intended for use when sending file descriptors
+// over IPC. Even if |auto_close| is true, base::FileDescriptor does NOT close()
+// |fd| when going out of scope. Instead, a consumer of a base::FileDescriptor
+// must invoke close() on |fd| if |auto_close| is true.
+//
+// In the case of IPC, the the IPC subsystem knows to close() |fd| after sending
+// a message that contains a base::FileDescriptor if auto_close == true. On the
+// other end, the receiver must make sure to close() |fd| after it has finished
+// processing the IPC message. See the IPC::ParamTraits<> specialization in
+// ipc/ipc_message_utils.h for all the details.
+// -----------------------------------------------------------------------------
+struct FileDescriptor {
+ FileDescriptor() : fd(kInvalidFd), auto_close(false) {}
+
+ FileDescriptor(int ifd, bool iauto_close) : fd(ifd), auto_close(iauto_close) {
+ }
+
+ FileDescriptor(File file) : fd(file.TakePlatformFile()), auto_close(true) {}
+ explicit FileDescriptor(ScopedFD fd) : fd(fd.release()), auto_close(true) {}
+
+ bool operator==(const FileDescriptor& other) const {
+ return (fd == other.fd && auto_close == other.auto_close);
+ }
+
+ bool operator!=(const FileDescriptor& other) const {
+ return !operator==(other);
+ }
+
+ // A comparison operator so that we can use these as keys in a std::map.
+ bool operator<(const FileDescriptor& other) const {
+ return other.fd < fd;
+ }
+
+ int fd;
+ // If true, this file descriptor should be closed after it has been used. For
+ // example an IPC system might interpret this flag as indicating that the
+ // file descriptor it has been given should be closed after use.
+ bool auto_close;
+};
+
+} // namespace base
+
+#endif // BASE_FILE_DESCRIPTOR_POSIX_H_
diff --git a/security/sandbox/chromium/base/files/file_path.h b/security/sandbox/chromium/base/files/file_path.h
new file mode 100644
index 0000000000..4e23f71a92
--- /dev/null
+++ b/security/sandbox/chromium/base/files/file_path.h
@@ -0,0 +1,484 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// FilePath is a container for pathnames stored in a platform's native string
+// type, providing containers for manipulation in according with the
+// platform's conventions for pathnames. It supports the following path
+// types:
+//
+// POSIX Windows
+// --------------- ----------------------------------
+// Fundamental type char[] wchar_t[]
+// Encoding unspecified* UTF-16
+// Separator / \, tolerant of /
+// Drive letters no case-insensitive A-Z followed by :
+// Alternate root // (surprise!) \\, for UNC paths
+//
+// * The encoding need not be specified on POSIX systems, although some
+// POSIX-compliant systems do specify an encoding. Mac OS X uses UTF-8.
+// Chrome OS also uses UTF-8.
+// Linux does not specify an encoding, but in practice, the locale's
+// character set may be used.
+//
+// For more arcane bits of path trivia, see below.
+//
+// FilePath objects are intended to be used anywhere paths are. An
+// application may pass FilePath objects around internally, masking the
+// underlying differences between systems, only differing in implementation
+// where interfacing directly with the system. For example, a single
+// OpenFile(const FilePath &) function may be made available, allowing all
+// callers to operate without regard to the underlying implementation. On
+// POSIX-like platforms, OpenFile might wrap fopen, and on Windows, it might
+// wrap _wfopen_s, perhaps both by calling file_path.value().c_str(). This
+// allows each platform to pass pathnames around without requiring conversions
+// between encodings, which has an impact on performance, but more imporantly,
+// has an impact on correctness on platforms that do not have well-defined
+// encodings for pathnames.
+//
+// Several methods are available to perform common operations on a FilePath
+// object, such as determining the parent directory (DirName), isolating the
+// final path component (BaseName), and appending a relative pathname string
+// to an existing FilePath object (Append). These methods are highly
+// recommended over attempting to split and concatenate strings directly.
+// These methods are based purely on string manipulation and knowledge of
+// platform-specific pathname conventions, and do not consult the filesystem
+// at all, making them safe to use without fear of blocking on I/O operations.
+// These methods do not function as mutators but instead return distinct
+// instances of FilePath objects, and are therefore safe to use on const
+// objects. The objects themselves are safe to share between threads.
+//
+// To aid in initialization of FilePath objects from string literals, a
+// FILE_PATH_LITERAL macro is provided, which accounts for the difference
+// between char[]-based pathnames on POSIX systems and wchar_t[]-based
+// pathnames on Windows.
+//
+// As a precaution against premature truncation, paths can't contain NULs.
+//
+// Because a FilePath object should not be instantiated at the global scope,
+// instead, use a FilePath::CharType[] and initialize it with
+// FILE_PATH_LITERAL. At runtime, a FilePath object can be created from the
+// character array. Example:
+//
+// | const FilePath::CharType kLogFileName[] = FILE_PATH_LITERAL("log.txt");
+// |
+// | void Function() {
+// | FilePath log_file_path(kLogFileName);
+// | [...]
+// | }
+//
+// WARNING: FilePaths should ALWAYS be displayed with LTR directionality, even
+// when the UI language is RTL. This means you always need to pass filepaths
+// through base::i18n::WrapPathWithLTRFormatting() before displaying it in the
+// RTL UI.
+//
+// This is a very common source of bugs, please try to keep this in mind.
+//
+// ARCANE BITS OF PATH TRIVIA
+//
+// - A double leading slash is actually part of the POSIX standard. Systems
+// are allowed to treat // as an alternate root, as Windows does for UNC
+// (network share) paths. Most POSIX systems don't do anything special
+// with two leading slashes, but FilePath handles this case properly
+// in case it ever comes across such a system. FilePath needs this support
+// for Windows UNC paths, anyway.
+// References:
+// The Open Group Base Specifications Issue 7, sections 3.267 ("Pathname")
+// and 4.12 ("Pathname Resolution"), available at:
+// http://www.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap03.html#tag_03_267
+// http://www.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap04.html#tag_04_12
+//
+// - Windows treats c:\\ the same way it treats \\. This was intended to
+// allow older applications that require drive letters to support UNC paths
+// like \\server\share\path, by permitting c:\\server\share\path as an
+// equivalent. Since the OS treats these paths specially, FilePath needs
+// to do the same. Since Windows can use either / or \ as the separator,
+// FilePath treats c://, c:\\, //, and \\ all equivalently.
+// Reference:
+// The Old New Thing, "Why is a drive letter permitted in front of UNC
+// paths (sometimes)?", available at:
+// http://blogs.msdn.com/oldnewthing/archive/2005/11/22/495740.aspx
+
+#ifndef BASE_FILES_FILE_PATH_H_
+#define BASE_FILES_FILE_PATH_H_
+
+#include <stddef.h>
+
+#include <functional>
+#include <iosfwd>
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "base/stl_util.h"
+#include "base/strings/string16.h"
+#include "base/strings/string_piece.h"
+#include "build/build_config.h"
+
+// Windows-style drive letter support and pathname separator characters can be
+// enabled and disabled independently, to aid testing. These #defines are
+// here so that the same setting can be used in both the implementation and
+// in the unit test.
+#if defined(OS_WIN)
+#define FILE_PATH_USES_DRIVE_LETTERS
+#define FILE_PATH_USES_WIN_SEPARATORS
+#endif // OS_WIN
+
+// To print path names portably use PRFilePath (based on PRIuS and friends from
+// C99 and format_macros.h) like this:
+// base::StringPrintf("Path is %" PRFilePath ".\n", path.value().c_str());
+#if defined(OS_WIN)
+#define PRFilePath "ls"
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+#define PRFilePath "s"
+#endif // OS_WIN
+
+// Macros for string literal initialization of FilePath::CharType[].
+#if defined(OS_WIN)
+#define FILE_PATH_LITERAL(x) L##x
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+#define FILE_PATH_LITERAL(x) x
+#endif // OS_WIN
+
+namespace base {
+
+class Pickle;
+class PickleIterator;
+
+// An abstraction to isolate users from the differences between native
+// pathnames on different platforms.
+class BASE_EXPORT FilePath {
+ public:
+#if defined(OS_WIN)
+ // On Windows, for Unicode-aware applications, native pathnames are wchar_t
+ // arrays encoded in UTF-16.
+ typedef std::wstring StringType;
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+ // On most platforms, native pathnames are char arrays, and the encoding
+ // may or may not be specified. On Mac OS X, native pathnames are encoded
+ // in UTF-8.
+ typedef std::string StringType;
+#endif // OS_WIN
+
+ typedef BasicStringPiece<StringType> StringPieceType;
+ typedef StringType::value_type CharType;
+
+ // Null-terminated array of separators used to separate components in
+ // hierarchical paths. Each character in this array is a valid separator,
+ // but kSeparators[0] is treated as the canonical separator and will be used
+ // when composing pathnames.
+ static const CharType kSeparators[];
+
+ // base::size(kSeparators).
+ static const size_t kSeparatorsLength;
+
+ // A special path component meaning "this directory."
+ static const CharType kCurrentDirectory[];
+
+ // A special path component meaning "the parent directory."
+ static const CharType kParentDirectory[];
+
+ // The character used to identify a file extension.
+ static const CharType kExtensionSeparator;
+
+ FilePath();
+ FilePath(const FilePath& that);
+ explicit FilePath(StringPieceType path);
+ ~FilePath();
+ FilePath& operator=(const FilePath& that);
+
+ // Constructs FilePath with the contents of |that|, which is left in valid but
+ // unspecified state.
+ FilePath(FilePath&& that) noexcept;
+ // Replaces the contents with those of |that|, which is left in valid but
+ // unspecified state.
+ FilePath& operator=(FilePath&& that);
+
+ bool operator==(const FilePath& that) const;
+
+ bool operator!=(const FilePath& that) const;
+
+ // Required for some STL containers and operations
+ bool operator<(const FilePath& that) const {
+ return path_ < that.path_;
+ }
+
+ const StringType& value() const { return path_; }
+
+ bool empty() const { return path_.empty(); }
+
+ void clear() { path_.clear(); }
+
+ // Returns true if |character| is in kSeparators.
+ static bool IsSeparator(CharType character);
+
+ // Returns a vector of all of the components of the provided path. It is
+ // equivalent to calling DirName().value() on the path's root component,
+ // and BaseName().value() on each child component.
+ //
+ // To make sure this is lossless so we can differentiate absolute and
+ // relative paths, the root slash will be included even though no other
+ // slashes will be. The precise behavior is:
+ //
+ // Posix: "/foo/bar" -> [ "/", "foo", "bar" ]
+ // Windows: "C:\foo\bar" -> [ "C:", "\\", "foo", "bar" ]
+ void GetComponents(std::vector<FilePath::StringType>* components) const;
+
+ // Returns true if this FilePath is a parent or ancestor of the |child|.
+ // Absolute and relative paths are accepted i.e. /foo is a parent to /foo/bar,
+ // and foo is a parent to foo/bar. Any ancestor is considered a parent i.e. /a
+ // is a parent to both /a/b and /a/b/c. Does not convert paths to absolute,
+ // follow symlinks or directory navigation (e.g. ".."). A path is *NOT* its
+ // own parent.
+ bool IsParent(const FilePath& child) const;
+
+ // If IsParent(child) holds, appends to path (if non-NULL) the
+ // relative path to child and returns true. For example, if parent
+ // holds "/Users/johndoe/Library/Application Support", child holds
+ // "/Users/johndoe/Library/Application Support/Google/Chrome/Default", and
+ // *path holds "/Users/johndoe/Library/Caches", then after
+ // parent.AppendRelativePath(child, path) is called *path will hold
+ // "/Users/johndoe/Library/Caches/Google/Chrome/Default". Otherwise,
+ // returns false.
+ bool AppendRelativePath(const FilePath& child, FilePath* path) const;
+
+ // Returns a FilePath corresponding to the directory containing the path
+ // named by this object, stripping away the file component. If this object
+ // only contains one component, returns a FilePath identifying
+ // kCurrentDirectory. If this object already refers to the root directory,
+ // returns a FilePath identifying the root directory. Please note that this
+ // doesn't resolve directory navigation, e.g. the result for "../a" is "..".
+ FilePath DirName() const WARN_UNUSED_RESULT;
+
+ // Returns a FilePath corresponding to the last path component of this
+ // object, either a file or a directory. If this object already refers to
+ // the root directory, returns a FilePath identifying the root directory;
+ // this is the only situation in which BaseName will return an absolute path.
+ FilePath BaseName() const WARN_UNUSED_RESULT;
+
+ // Returns ".jpg" for path "C:\pics\jojo.jpg", or an empty string if
+ // the file has no extension. If non-empty, Extension() will always start
+ // with precisely one ".". The following code should always work regardless
+ // of the value of path. For common double-extensions like .tar.gz and
+ // .user.js, this method returns the combined extension. For a single
+ // component, use FinalExtension().
+ // new_path = path.RemoveExtension().value().append(path.Extension());
+ // ASSERT(new_path == path.value());
+ // NOTE: this is different from the original file_util implementation which
+ // returned the extension without a leading "." ("jpg" instead of ".jpg")
+ StringType Extension() const WARN_UNUSED_RESULT;
+
+ // Returns the path's file extension, as in Extension(), but will
+ // never return a double extension.
+ //
+ // TODO(davidben): Check all our extension-sensitive code to see if
+ // we can rename this to Extension() and the other to something like
+ // LongExtension(), defaulting to short extensions and leaving the
+ // long "extensions" to logic like base::GetUniquePathNumber().
+ StringType FinalExtension() const WARN_UNUSED_RESULT;
+
+ // Returns "C:\pics\jojo" for path "C:\pics\jojo.jpg"
+ // NOTE: this is slightly different from the similar file_util implementation
+ // which returned simply 'jojo'.
+ FilePath RemoveExtension() const WARN_UNUSED_RESULT;
+
+ // Removes the path's file extension, as in RemoveExtension(), but
+ // ignores double extensions.
+ FilePath RemoveFinalExtension() const WARN_UNUSED_RESULT;
+
+ // Inserts |suffix| after the file name portion of |path| but before the
+ // extension. Returns "" if BaseName() == "." or "..".
+ // Examples:
+ // path == "C:\pics\jojo.jpg" suffix == " (1)", returns "C:\pics\jojo (1).jpg"
+ // path == "jojo.jpg" suffix == " (1)", returns "jojo (1).jpg"
+ // path == "C:\pics\jojo" suffix == " (1)", returns "C:\pics\jojo (1)"
+ // path == "C:\pics.old\jojo" suffix == " (1)", returns "C:\pics.old\jojo (1)"
+ FilePath InsertBeforeExtension(
+ StringPieceType suffix) const WARN_UNUSED_RESULT;
+ FilePath InsertBeforeExtensionASCII(
+ StringPiece suffix) const WARN_UNUSED_RESULT;
+
+ // Adds |extension| to |file_name|. Returns the current FilePath if
+ // |extension| is empty. Returns "" if BaseName() == "." or "..".
+ FilePath AddExtension(StringPieceType extension) const WARN_UNUSED_RESULT;
+
+ // Like above, but takes the extension as an ASCII string. See AppendASCII for
+ // details on how this is handled.
+ FilePath AddExtensionASCII(StringPiece extension) const WARN_UNUSED_RESULT;
+
+ // Replaces the extension of |file_name| with |extension|. If |file_name|
+ // does not have an extension, then |extension| is added. If |extension| is
+ // empty, then the extension is removed from |file_name|.
+ // Returns "" if BaseName() == "." or "..".
+ FilePath ReplaceExtension(StringPieceType extension) const WARN_UNUSED_RESULT;
+
+ // Returns true if the file path matches the specified extension. The test is
+ // case insensitive. Don't forget the leading period if appropriate.
+ bool MatchesExtension(StringPieceType extension) const;
+
+ // Returns a FilePath by appending a separator and the supplied path
+ // component to this object's path. Append takes care to avoid adding
+ // excessive separators if this object's path already ends with a separator.
+ // If this object's path is kCurrentDirectory, a new FilePath corresponding
+ // only to |component| is returned. |component| must be a relative path;
+ // it is an error to pass an absolute path.
+ FilePath Append(StringPieceType component) const WARN_UNUSED_RESULT;
+ FilePath Append(const FilePath& component) const WARN_UNUSED_RESULT;
+
+ // Although Windows StringType is std::wstring, since the encoding it uses for
+ // paths is well defined, it can handle ASCII path components as well.
+ // Mac uses UTF8, and since ASCII is a subset of that, it works there as well.
+ // On Linux, although it can use any 8-bit encoding for paths, we assume that
+ // ASCII is a valid subset, regardless of the encoding, since many operating
+ // system paths will always be ASCII.
+ FilePath AppendASCII(StringPiece component) const WARN_UNUSED_RESULT;
+
+ // Returns true if this FilePath contains an absolute path. On Windows, an
+ // absolute path begins with either a drive letter specification followed by
+ // a separator character, or with two separator characters. On POSIX
+ // platforms, an absolute path begins with a separator character.
+ bool IsAbsolute() const;
+
+ // Returns true if the patch ends with a path separator character.
+ bool EndsWithSeparator() const WARN_UNUSED_RESULT;
+
+ // Returns a copy of this FilePath that ends with a trailing separator. If
+ // the input path is empty, an empty FilePath will be returned.
+ FilePath AsEndingWithSeparator() const WARN_UNUSED_RESULT;
+
+ // Returns a copy of this FilePath that does not end with a trailing
+ // separator.
+ FilePath StripTrailingSeparators() const WARN_UNUSED_RESULT;
+
+ // Returns true if this FilePath contains an attempt to reference a parent
+ // directory (e.g. has a path component that is "..").
+ bool ReferencesParent() const;
+
+ // Return a Unicode human-readable version of this path.
+ // Warning: you can *not*, in general, go from a display name back to a real
+ // path. Only use this when displaying paths to users, not just when you
+ // want to stuff a string16 into some other API.
+ string16 LossyDisplayName() const;
+
+ // Return the path as ASCII, or the empty string if the path is not ASCII.
+ // This should only be used for cases where the FilePath is representing a
+ // known-ASCII filename.
+ std::string MaybeAsASCII() const;
+
+ // Return the path as UTF-8.
+ //
+ // This function is *unsafe* as there is no way to tell what encoding is
+ // used in file names on POSIX systems other than Mac and Chrome OS,
+ // although UTF-8 is practically used everywhere these days. To mitigate
+ // the encoding issue, this function internally calls
+ // SysNativeMBToWide() on POSIX systems other than Mac and Chrome OS,
+ // per assumption that the current locale's encoding is used in file
+ // names, but this isn't a perfect solution.
+ //
+ // Once it becomes safe to to stop caring about non-UTF-8 file names,
+ // the SysNativeMBToWide() hack will be removed from the code, along
+ // with "Unsafe" in the function name.
+ std::string AsUTF8Unsafe() const;
+
+ // Similar to AsUTF8Unsafe, but returns UTF-16 instead.
+ string16 AsUTF16Unsafe() const;
+
+ // Returns a FilePath object from a path name in UTF-8. This function
+ // should only be used for cases where you are sure that the input
+ // string is UTF-8.
+ //
+ // Like AsUTF8Unsafe(), this function is unsafe. This function
+ // internally calls SysWideToNativeMB() on POSIX systems other than Mac
+ // and Chrome OS, to mitigate the encoding issue. See the comment at
+ // AsUTF8Unsafe() for details.
+ static FilePath FromUTF8Unsafe(StringPiece utf8);
+
+ // Similar to FromUTF8Unsafe, but accepts UTF-16 instead.
+ static FilePath FromUTF16Unsafe(StringPiece16 utf16);
+
+ void WriteToPickle(Pickle* pickle) const;
+ bool ReadFromPickle(PickleIterator* iter);
+
+ // Normalize all path separators to backslash on Windows
+ // (if FILE_PATH_USES_WIN_SEPARATORS is true), or do nothing on POSIX systems.
+ FilePath NormalizePathSeparators() const;
+
+ // Normalize all path separattors to given type on Windows
+ // (if FILE_PATH_USES_WIN_SEPARATORS is true), or do nothing on POSIX systems.
+ FilePath NormalizePathSeparatorsTo(CharType separator) const;
+
+ // Compare two strings in the same way the file system does.
+ // Note that these always ignore case, even on file systems that are case-
+ // sensitive. If case-sensitive comparison is ever needed, add corresponding
+ // methods here.
+ // The methods are written as a static method so that they can also be used
+ // on parts of a file path, e.g., just the extension.
+ // CompareIgnoreCase() returns -1, 0 or 1 for less-than, equal-to and
+ // greater-than respectively.
+ static int CompareIgnoreCase(StringPieceType string1,
+ StringPieceType string2);
+ static bool CompareEqualIgnoreCase(StringPieceType string1,
+ StringPieceType string2) {
+ return CompareIgnoreCase(string1, string2) == 0;
+ }
+ static bool CompareLessIgnoreCase(StringPieceType string1,
+ StringPieceType string2) {
+ return CompareIgnoreCase(string1, string2) < 0;
+ }
+
+#if defined(OS_MACOSX)
+ // Returns the string in the special canonical decomposed form as defined for
+ // HFS, which is close to, but not quite, decomposition form D. See
+ // http://developer.apple.com/mac/library/technotes/tn/tn1150.html#UnicodeSubtleties
+ // for further comments.
+ // Returns the epmty string if the conversion failed.
+ static StringType GetHFSDecomposedForm(StringPieceType string);
+
+ // Special UTF-8 version of FastUnicodeCompare. Cf:
+ // http://developer.apple.com/mac/library/technotes/tn/tn1150.html#StringComparisonAlgorithm
+ // IMPORTANT: The input strings must be in the special HFS decomposed form!
+ // (cf. above GetHFSDecomposedForm method)
+ static int HFSFastUnicodeCompare(StringPieceType string1,
+ StringPieceType string2);
+#endif
+
+#if defined(OS_ANDROID)
+ // On android, file selection dialog can return a file with content uri
+ // scheme(starting with content://). Content uri needs to be opened with
+ // ContentResolver to guarantee that the app has appropriate permissions
+ // to access it.
+ // Returns true if the path is a content uri, or false otherwise.
+ bool IsContentUri() const;
+#endif
+
+ private:
+ // Remove trailing separators from this object. If the path is absolute, it
+ // will never be stripped any more than to refer to the absolute root
+ // directory, so "////" will become "/", not "". A leading pair of
+ // separators is never stripped, to support alternate roots. This is used to
+ // support UNC paths on Windows.
+ void StripTrailingSeparatorsInternal();
+
+ StringType path_;
+};
+
+BASE_EXPORT std::ostream& operator<<(std::ostream& out,
+ const FilePath& file_path);
+
+} // namespace base
+
+namespace std {
+
+template <>
+struct hash<base::FilePath> {
+ typedef base::FilePath argument_type;
+ typedef std::size_t result_type;
+ result_type operator()(argument_type const& f) const {
+ return hash<base::FilePath::StringType>()(f.value());
+ }
+};
+
+} // namespace std
+
+#endif // BASE_FILES_FILE_PATH_H_
diff --git a/security/sandbox/chromium/base/files/file_path_constants.cc b/security/sandbox/chromium/base/files/file_path_constants.cc
new file mode 100644
index 0000000000..d2d8d31fe1
--- /dev/null
+++ b/security/sandbox/chromium/base/files/file_path_constants.cc
@@ -0,0 +1,25 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+
+#include "base/files/file_path.h"
+#include "base/stl_util.h"
+
+namespace base {
+
+#if defined(FILE_PATH_USES_WIN_SEPARATORS)
+const FilePath::CharType FilePath::kSeparators[] = FILE_PATH_LITERAL("\\/");
+#else // FILE_PATH_USES_WIN_SEPARATORS
+const FilePath::CharType FilePath::kSeparators[] = FILE_PATH_LITERAL("/");
+#endif // FILE_PATH_USES_WIN_SEPARATORS
+
+const size_t FilePath::kSeparatorsLength = base::size(kSeparators);
+
+const FilePath::CharType FilePath::kCurrentDirectory[] = FILE_PATH_LITERAL(".");
+const FilePath::CharType FilePath::kParentDirectory[] = FILE_PATH_LITERAL("..");
+
+const FilePath::CharType FilePath::kExtensionSeparator = FILE_PATH_LITERAL('.');
+
+} // namespace base
diff --git a/security/sandbox/chromium/base/format_macros.h b/security/sandbox/chromium/base/format_macros.h
new file mode 100644
index 0000000000..1279ff7816
--- /dev/null
+++ b/security/sandbox/chromium/base/format_macros.h
@@ -0,0 +1,97 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FORMAT_MACROS_H_
+#define BASE_FORMAT_MACROS_H_
+
+// This file defines the format macros for some integer types.
+
+// To print a 64-bit value in a portable way:
+// int64_t value;
+// printf("xyz:%" PRId64, value);
+// The "d" in the macro corresponds to %d; you can also use PRIu64 etc.
+//
+// For wide strings, prepend "Wide" to the macro:
+// int64_t value;
+// StringPrintf(L"xyz: %" WidePRId64, value);
+//
+// To print a size_t value in a portable way:
+// size_t size;
+// printf("xyz: %" PRIuS, size);
+// The "u" in the macro corresponds to %u, and S is for "size".
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "build/build_config.h"
+
+#if (defined(OS_POSIX) || defined(OS_FUCHSIA)) && \
+ (defined(_INTTYPES_H) || defined(_INTTYPES_H_)) && !defined(PRId64)
+#error "inttypes.h has already been included before this header file, but "
+#error "without __STDC_FORMAT_MACROS defined."
+#endif
+
+#if (defined(OS_POSIX) || defined(OS_FUCHSIA)) && !defined(__STDC_FORMAT_MACROS)
+#define __STDC_FORMAT_MACROS
+#endif
+
+#include <inttypes.h>
+
+#if defined(OS_WIN)
+
+#if !defined(PRId64) || !defined(PRIu64) || !defined(PRIx64)
+#error "inttypes.h provided by win toolchain should define these."
+#endif
+
+#define WidePRId64 L"I64d"
+#define WidePRIu64 L"I64u"
+#define WidePRIx64 L"I64x"
+
+#if !defined(PRIuS)
+#define PRIuS "Iu"
+#endif
+
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+
+// GCC will concatenate wide and narrow strings correctly, so nothing needs to
+// be done here.
+#define WidePRId64 PRId64
+#define WidePRIu64 PRIu64
+#define WidePRIx64 PRIx64
+
+#if !defined(PRIuS)
+#define PRIuS "zu"
+#endif
+
+#endif // defined(OS_WIN)
+
+// The size of NSInteger and NSUInteger varies between 32-bit and 64-bit
+// architectures and Apple does not provides standard format macros and
+// recommends casting. This has many drawbacks, so instead define macros
+// for formatting those types.
+#if defined(OS_MACOSX)
+#if defined(ARCH_CPU_64_BITS)
+#if !defined(PRIdNS)
+#define PRIdNS "ld"
+#endif
+#if !defined(PRIuNS)
+#define PRIuNS "lu"
+#endif
+#if !defined(PRIxNS)
+#define PRIxNS "lx"
+#endif
+#else // defined(ARCH_CPU_64_BITS)
+#if !defined(PRIdNS)
+#define PRIdNS "d"
+#endif
+#if !defined(PRIuNS)
+#define PRIuNS "u"
+#endif
+#if !defined(PRIxNS)
+#define PRIxNS "x"
+#endif
+#endif
+#endif // defined(OS_MACOSX)
+
+#endif // BASE_FORMAT_MACROS_H_
diff --git a/security/sandbox/chromium/base/guid.h b/security/sandbox/chromium/base/guid.h
new file mode 100644
index 0000000000..f50d3335aa
--- /dev/null
+++ b/security/sandbox/chromium/base/guid.h
@@ -0,0 +1,46 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_GUID_H_
+#define BASE_GUID_H_
+
+#include <stdint.h>
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/strings/string_piece.h"
+#include "build/build_config.h"
+
+namespace base {
+
+// Generate a 128-bit random GUID in the form of version 4 as described in
+// RFC 4122, section 4.4.
+// The format of GUID version 4 must be xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx,
+// where y is one of [8, 9, A, B].
+// The hexadecimal values "a" through "f" are output as lower case characters.
+//
+// A cryptographically secure random source will be used, but consider using
+// UnguessableToken for greater type-safety if GUID format is unnecessary.
+BASE_EXPORT std::string GenerateGUID();
+
+// Returns true if the input string conforms to the version 4 GUID format.
+// Note that this does NOT check if the hexadecimal values "a" through "f"
+// are in lower case characters, as Version 4 RFC says onput they're
+// case insensitive. (Use IsValidGUIDOutputString for checking if the
+// given string is valid output string)
+BASE_EXPORT bool IsValidGUID(base::StringPiece guid);
+BASE_EXPORT bool IsValidGUID(base::StringPiece16 guid);
+
+// Returns true if the input string is valid version 4 GUID output string.
+// This also checks if the hexadecimal values "a" through "f" are in lower
+// case characters.
+BASE_EXPORT bool IsValidGUIDOutputString(base::StringPiece guid);
+
+// For unit testing purposes only. Do not use outside of tests.
+BASE_EXPORT std::string RandomDataToGUIDString(const uint64_t bytes[2]);
+
+} // namespace base
+
+#endif // BASE_GUID_H_
diff --git a/security/sandbox/chromium/base/hash/hash.cc b/security/sandbox/chromium/base/hash/hash.cc
new file mode 100644
index 0000000000..c96f8bc843
--- /dev/null
+++ b/security/sandbox/chromium/base/hash/hash.cc
@@ -0,0 +1,167 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/hash/hash.h"
+
+#include "base/rand_util.h"
+#include "base/third_party/cityhash/city.h"
+#include "build/build_config.h"
+
+// Definition in base/third_party/superfasthash/superfasthash.c. (Third-party
+// code did not come with its own header file, so declaring the function here.)
+// Note: This algorithm is also in Blink under Source/wtf/StringHasher.h.
+extern "C" uint32_t SuperFastHash(const char* data, int len);
+
+namespace base {
+
+namespace {
+
+size_t FastHashImpl(base::span<const uint8_t> data) {
+ // We use the updated CityHash within our namespace (not the deprecated
+ // version from third_party/smhasher).
+#if defined(ARCH_CPU_64_BITS)
+ return base::internal::cityhash_v111::CityHash64(
+ reinterpret_cast<const char*>(data.data()), data.size());
+#else
+ return base::internal::cityhash_v111::CityHash32(
+ reinterpret_cast<const char*>(data.data()), data.size());
+#endif
+}
+
+// Implement hashing for pairs of at-most 32 bit integer values.
+// When size_t is 32 bits, we turn the 64-bit hash code into 32 bits by using
+// multiply-add hashing. This algorithm, as described in
+// Theorem 4.3.3 of the thesis "Über die Komplexität der Multiplikation in
+// eingeschränkten Branchingprogrammmodellen" by Woelfel, is:
+//
+// h32(x32, y32) = (h64(x32, y32) * rand_odd64 + rand16 * 2^16) % 2^64 / 2^32
+//
+// Contact danakj@chromium.org for any questions.
+size_t HashInts32Impl(uint32_t value1, uint32_t value2) {
+ uint64_t value1_64 = value1;
+ uint64_t hash64 = (value1_64 << 32) | value2;
+
+ if (sizeof(size_t) >= sizeof(uint64_t))
+ return static_cast<size_t>(hash64);
+
+ uint64_t odd_random = 481046412LL << 32 | 1025306955LL;
+ uint32_t shift_random = 10121U << 16;
+
+ hash64 = hash64 * odd_random + shift_random;
+ size_t high_bits =
+ static_cast<size_t>(hash64 >> (8 * (sizeof(uint64_t) - sizeof(size_t))));
+ return high_bits;
+}
+
+// Implement hashing for pairs of up-to 64-bit integer values.
+// We use the compound integer hash method to produce a 64-bit hash code, by
+// breaking the two 64-bit inputs into 4 32-bit values:
+// http://opendatastructures.org/versions/edition-0.1d/ods-java/node33.html#SECTION00832000000000000000
+// Then we reduce our result to 32 bits if required, similar to above.
+size_t HashInts64Impl(uint64_t value1, uint64_t value2) {
+ uint32_t short_random1 = 842304669U;
+ uint32_t short_random2 = 619063811U;
+ uint32_t short_random3 = 937041849U;
+ uint32_t short_random4 = 3309708029U;
+
+ uint32_t value1a = static_cast<uint32_t>(value1 & 0xffffffff);
+ uint32_t value1b = static_cast<uint32_t>((value1 >> 32) & 0xffffffff);
+ uint32_t value2a = static_cast<uint32_t>(value2 & 0xffffffff);
+ uint32_t value2b = static_cast<uint32_t>((value2 >> 32) & 0xffffffff);
+
+ uint64_t product1 = static_cast<uint64_t>(value1a) * short_random1;
+ uint64_t product2 = static_cast<uint64_t>(value1b) * short_random2;
+ uint64_t product3 = static_cast<uint64_t>(value2a) * short_random3;
+ uint64_t product4 = static_cast<uint64_t>(value2b) * short_random4;
+
+ uint64_t hash64 = product1 + product2 + product3 + product4;
+
+ if (sizeof(size_t) >= sizeof(uint64_t))
+ return static_cast<size_t>(hash64);
+
+ uint64_t odd_random = 1578233944LL << 32 | 194370989LL;
+ uint32_t shift_random = 20591U << 16;
+
+ hash64 = hash64 * odd_random + shift_random;
+ size_t high_bits =
+ static_cast<size_t>(hash64 >> (8 * (sizeof(uint64_t) - sizeof(size_t))));
+ return high_bits;
+}
+
+// The random seed is used to perturb the output of base::FastHash() and
+// base::HashInts() so that it is only deterministic within the lifetime of a
+// process. This prevents inadvertent dependencies on the underlying
+// implementation, e.g. anything that persists the hash value and expects it to
+// be unchanging will break.
+//
+// Note: this is the same trick absl uses to generate a random seed. This is
+// more robust than using base::RandBytes(), which can fail inside a sandboxed
+// environment. Note that without ASLR, the seed won't be quite as random...
+#if DCHECK_IS_ON()
+constexpr const void* kSeed = &kSeed;
+#endif
+
+template <typename T>
+T Scramble(T input) {
+#if DCHECK_IS_ON()
+ return HashInts64Impl(input, reinterpret_cast<uintptr_t>(kSeed));
+#else
+ return input;
+#endif
+}
+
+} // namespace
+
+size_t FastHash(base::span<const uint8_t> data) {
+ return Scramble(FastHashImpl(data));
+}
+
+uint32_t Hash(const void* data, size_t length) {
+ // Currently our in-memory hash is the same as the persistent hash. The
+ // split between in-memory and persistent hash functions is maintained to
+ // allow the in-memory hash function to be updated in the future.
+ return PersistentHash(data, length);
+}
+
+uint32_t Hash(const std::string& str) {
+ return PersistentHash(as_bytes(make_span(str)));
+}
+
+uint32_t Hash(const string16& str) {
+ return PersistentHash(as_bytes(make_span(str)));
+}
+
+uint32_t PersistentHash(span<const uint8_t> data) {
+ // This hash function must not change, since it is designed to be persistable
+ // to disk.
+ if (data.size() > static_cast<size_t>(std::numeric_limits<int>::max())) {
+ NOTREACHED();
+ return 0;
+ }
+ return ::SuperFastHash(reinterpret_cast<const char*>(data.data()),
+ static_cast<int>(data.size()));
+}
+
+uint32_t PersistentHash(const void* data, size_t length) {
+ return PersistentHash(make_span(static_cast<const uint8_t*>(data), length));
+}
+
+uint32_t PersistentHash(const std::string& str) {
+ return PersistentHash(str.data(), str.size());
+}
+
+size_t HashInts32(uint32_t value1, uint32_t value2) {
+ return Scramble(HashInts32Impl(value1, value2));
+}
+
+// Implement hashing for pairs of up-to 64-bit integer values.
+// We use the compound integer hash method to produce a 64-bit hash code, by
+// breaking the two 64-bit inputs into 4 32-bit values:
+// http://opendatastructures.org/versions/edition-0.1d/ods-java/node33.html#SECTION00832000000000000000
+// Then we reduce our result to 32 bits if required, similar to above.
+size_t HashInts64(uint64_t value1, uint64_t value2) {
+ return Scramble(HashInts64Impl(value1, value2));
+}
+
+} // namespace base
diff --git a/security/sandbox/chromium/base/hash/hash.h b/security/sandbox/chromium/base/hash/hash.h
new file mode 100644
index 0000000000..f55ae45b38
--- /dev/null
+++ b/security/sandbox/chromium/base/hash/hash.h
@@ -0,0 +1,86 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_HASH_HASH_H_
+#define BASE_HASH_HASH_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <limits>
+#include <string>
+#include <utility>
+
+#include "base/base_export.h"
+#include "base/containers/span.h"
+#include "base/logging.h"
+#include "base/strings/string16.h"
+#include "base/strings/string_piece.h"
+
+namespace base {
+
+// WARNING: This hash functions should not be used for any cryptographic
+// purpose.
+
+// Deprecated: Computes a hash of a memory buffer, use FastHash() instead.
+// If you need to persist a change on disk or between computers, use
+// PersistentHash().
+// TODO(https://crbug.com/1025358): Migrate client code to new hash function.
+BASE_EXPORT uint32_t Hash(const void* data, size_t length);
+BASE_EXPORT uint32_t Hash(const std::string& str);
+BASE_EXPORT uint32_t Hash(const string16& str);
+
+// Really *fast* and high quality hash.
+// Recommended hash function for general use, we pick the best performant
+// hash for each build target.
+// It is prone to be updated whenever a newer/faster hash function is
+// publicly available.
+// May changed without warning, do not expect stability of outputs.
+BASE_EXPORT size_t FastHash(base::span<const uint8_t> data);
+inline size_t FastHash(StringPiece str) {
+ return FastHash(as_bytes(make_span(str)));
+}
+
+// Computes a hash of a memory buffer. This hash function must not change so
+// that code can use the hashed values for persistent storage purposes or
+// sending across the network. If a new persistent hash function is desired, a
+// new version will have to be added in addition.
+//
+// WARNING: This hash function should not be used for any cryptographic purpose.
+BASE_EXPORT uint32_t PersistentHash(base::span<const uint8_t> data);
+BASE_EXPORT uint32_t PersistentHash(const void* data, size_t length);
+BASE_EXPORT uint32_t PersistentHash(const std::string& str);
+
+// Hash pairs of 32-bit or 64-bit numbers.
+BASE_EXPORT size_t HashInts32(uint32_t value1, uint32_t value2);
+BASE_EXPORT size_t HashInts64(uint64_t value1, uint64_t value2);
+
+template <typename T1, typename T2>
+inline size_t HashInts(T1 value1, T2 value2) {
+ // This condition is expected to be compile-time evaluated and optimised away
+ // in release builds.
+ if (sizeof(T1) > sizeof(uint32_t) || (sizeof(T2) > sizeof(uint32_t)))
+ return HashInts64(value1, value2);
+
+ return HashInts32(static_cast<uint32_t>(value1),
+ static_cast<uint32_t>(value2));
+}
+
+// A templated hasher for pairs of integer types. Example:
+//
+// using MyPair = std::pair<int32_t, int32_t>;
+// std::unordered_set<MyPair, base::IntPairHash<MyPair>> set;
+template <typename T>
+struct IntPairHash;
+
+template <typename Type1, typename Type2>
+struct IntPairHash<std::pair<Type1, Type2>> {
+ size_t operator()(std::pair<Type1, Type2> value) const {
+ return HashInts(value.first, value.second);
+ }
+};
+
+} // namespace base
+
+#endif // BASE_HASH_HASH_H_
diff --git a/security/sandbox/chromium/base/immediate_crash.h b/security/sandbox/chromium/base/immediate_crash.h
new file mode 100644
index 0000000000..733110a7ac
--- /dev/null
+++ b/security/sandbox/chromium/base/immediate_crash.h
@@ -0,0 +1,168 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_IMMEDIATE_CRASH_H_
+#define BASE_IMMEDIATE_CRASH_H_
+
+#include "build/build_config.h"
+
+// Crashes in the fastest possible way with no attempt at logging.
+// There are several constraints; see http://crbug.com/664209 for more context.
+//
+// - TRAP_SEQUENCE_() must be fatal. It should not be possible to ignore the
+// resulting exception or simply hit 'continue' to skip over it in a debugger.
+// - Different instances of TRAP_SEQUENCE_() must not be folded together, to
+// ensure crash reports are debuggable. Unlike __builtin_trap(), asm volatile
+// blocks will not be folded together.
+// Note: TRAP_SEQUENCE_() previously required an instruction with a unique
+// nonce since unlike clang, GCC folds together identical asm volatile
+// blocks.
+// - TRAP_SEQUENCE_() must produce a signal that is distinct from an invalid
+// memory access.
+// - TRAP_SEQUENCE_() must be treated as a set of noreturn instructions.
+// __builtin_unreachable() is used to provide that hint here. clang also uses
+// this as a heuristic to pack the instructions in the function epilogue to
+// improve code density.
+//
+// Additional properties that are nice to have:
+// - TRAP_SEQUENCE_() should be as compact as possible.
+// - The first instruction of TRAP_SEQUENCE_() should not change, to avoid
+// shifting crash reporting clusters. As a consequence of this, explicit
+// assembly is preferred over intrinsics.
+// Note: this last bullet point may no longer be true, and may be removed in
+// the future.
+
+// Note: TRAP_SEQUENCE Is currently split into two macro helpers due to the fact
+// that clang emits an actual instruction for __builtin_unreachable() on certain
+// platforms (see https://crbug.com/958675). In addition, the int3/bkpt/brk will
+// be removed in followups, so splitting it up like this now makes it easy to
+// land the followups.
+
+#if defined(COMPILER_GCC)
+
+#if defined(OS_NACL)
+
+// Crash report accuracy is not guaranteed on NaCl.
+#define TRAP_SEQUENCE1_() __builtin_trap()
+#define TRAP_SEQUENCE2_() asm volatile("")
+
+#elif defined(ARCH_CPU_X86_FAMILY)
+
+// TODO(https://crbug.com/958675): In theory, it should be possible to use just
+// int3. However, there are a number of crashes with SIGILL as the exception
+// code, so it seems likely that there's a signal handler that allows execution
+// to continue after SIGTRAP.
+#define TRAP_SEQUENCE1_() asm volatile("int3")
+
+#if defined(OS_MACOSX)
+// Intentionally empty: __builtin_unreachable() is always part of the sequence
+// (see IMMEDIATE_CRASH below) and already emits a ud2 on Mac.
+#define TRAP_SEQUENCE2_() asm volatile("")
+#else
+#define TRAP_SEQUENCE2_() asm volatile("ud2")
+#endif // defined(OS_MACOSX)
+
+#elif defined(ARCH_CPU_ARMEL)
+
+// bkpt will generate a SIGBUS when running on armv7 and a SIGTRAP when running
+// as a 32 bit userspace app on arm64. There doesn't seem to be any way to
+// cause a SIGTRAP from userspace without using a syscall (which would be a
+// problem for sandboxing).
+// TODO(https://crbug.com/958675): Remove bkpt from this sequence.
+#define TRAP_SEQUENCE1_() asm volatile("bkpt #0")
+#define TRAP_SEQUENCE2_() asm volatile("udf #0")
+
+#elif defined(ARCH_CPU_ARM64)
+
+// This will always generate a SIGTRAP on arm64.
+// TODO(https://crbug.com/958675): Remove brk from this sequence.
+#define TRAP_SEQUENCE1_() asm volatile("brk #0")
+#define TRAP_SEQUENCE2_() asm volatile("hlt #0")
+
+#else
+
+// Crash report accuracy will not be guaranteed on other architectures, but at
+// least this will crash as expected.
+#define TRAP_SEQUENCE1_() __builtin_trap()
+#define TRAP_SEQUENCE2_() asm volatile("")
+
+#endif // ARCH_CPU_*
+
+#elif defined(COMPILER_MSVC)
+
+#if !defined(__clang__)
+
+// MSVC x64 doesn't support inline asm, so use the MSVC intrinsic.
+#define TRAP_SEQUENCE1_() __debugbreak()
+#define TRAP_SEQUENCE2_()
+
+#elif defined(ARCH_CPU_ARM64)
+
+// Windows ARM64 uses "BRK #F000" as its breakpoint instruction, and
+// __debugbreak() generates that in both VC++ and clang.
+#define TRAP_SEQUENCE1_() __debugbreak()
+// Intentionally empty: __builtin_unreachable() is always part of the sequence
+// (see IMMEDIATE_CRASH below) and already emits a ud2 on Win64,
+// https://crbug.com/958373
+#define TRAP_SEQUENCE2_() __asm volatile("")
+
+#else
+
+#define TRAP_SEQUENCE1_() asm volatile("int3")
+#define TRAP_SEQUENCE2_() asm volatile("ud2")
+
+#endif // __clang__
+
+#else
+
+#error No supported trap sequence!
+
+#endif // COMPILER_GCC
+
+#define TRAP_SEQUENCE_() \
+ do { \
+ TRAP_SEQUENCE1_(); \
+ TRAP_SEQUENCE2_(); \
+ } while (false)
+
+// CHECK() and the trap sequence can be invoked from a constexpr function.
+// This could make compilation fail on GCC, as it forbids directly using inline
+// asm inside a constexpr function. However, it allows calling a lambda
+// expression including the same asm.
+// The side effect is that the top of the stacktrace will not point to the
+// calling function, but to this anonymous lambda. This is still useful as the
+// full name of the lambda will typically include the name of the function that
+// calls CHECK() and the debugger will still break at the right line of code.
+#if !defined(COMPILER_GCC)
+
+#define WRAPPED_TRAP_SEQUENCE_() TRAP_SEQUENCE_()
+
+#else
+
+#define WRAPPED_TRAP_SEQUENCE_() \
+ do { \
+ [] { TRAP_SEQUENCE_(); }(); \
+ } while (false)
+
+#endif // !defined(COMPILER_GCC)
+
+#if defined(__clang__) || defined(COMPILER_GCC)
+
+// __builtin_unreachable() hints to the compiler that this is noreturn and can
+// be packed in the function epilogue.
+#define IMMEDIATE_CRASH() \
+ ({ \
+ WRAPPED_TRAP_SEQUENCE_(); \
+ __builtin_unreachable(); \
+ })
+
+#else
+
+// This is supporting non-chromium user of logging.h to build with MSVC, like
+// pdfium. On MSVC there is no __builtin_unreachable().
+#define IMMEDIATE_CRASH() WRAPPED_TRAP_SEQUENCE_()
+
+#endif // defined(__clang__) || defined(COMPILER_GCC)
+
+#endif // BASE_IMMEDIATE_CRASH_H_
diff --git a/security/sandbox/chromium/base/lazy_instance.h b/security/sandbox/chromium/base/lazy_instance.h
new file mode 100644
index 0000000000..4449373ead
--- /dev/null
+++ b/security/sandbox/chromium/base/lazy_instance.h
@@ -0,0 +1,210 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! DEPRECATED !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+// Please don't introduce new instances of LazyInstance<T>. Use a function-local
+// static of type base::NoDestructor<T> instead:
+//
+// Factory& Factory::GetInstance() {
+// static base::NoDestructor<Factory> instance;
+// return *instance;
+// }
+// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+//
+// The LazyInstance<Type, Traits> class manages a single instance of Type,
+// which will be lazily created on the first time it's accessed. This class is
+// useful for places you would normally use a function-level static, but you
+// need to have guaranteed thread-safety. The Type constructor will only ever
+// be called once, even if two threads are racing to create the object. Get()
+// and Pointer() will always return the same, completely initialized instance.
+// When the instance is constructed it is registered with AtExitManager. The
+// destructor will be called on program exit.
+//
+// LazyInstance is completely thread safe, assuming that you create it safely.
+// The class was designed to be POD initialized, so it shouldn't require a
+// static constructor. It really only makes sense to declare a LazyInstance as
+// a global variable using the LAZY_INSTANCE_INITIALIZER initializer.
+//
+// LazyInstance is similar to Singleton, except it does not have the singleton
+// property. You can have multiple LazyInstance's of the same type, and each
+// will manage a unique instance. It also preallocates the space for Type, as
+// to avoid allocating the Type instance on the heap. This may help with the
+// performance of creating the instance, and reducing heap fragmentation. This
+// requires that Type be a complete type so we can determine the size.
+//
+// Example usage:
+// static LazyInstance<MyClass>::Leaky inst = LAZY_INSTANCE_INITIALIZER;
+// void SomeMethod() {
+// inst.Get().SomeMethod(); // MyClass::SomeMethod()
+//
+// MyClass* ptr = inst.Pointer();
+// ptr->DoDoDo(); // MyClass::DoDoDo
+// }
+
+#ifndef BASE_LAZY_INSTANCE_H_
+#define BASE_LAZY_INSTANCE_H_
+
+#include <new> // For placement new.
+
+#include "base/atomicops.h"
+#include "base/debug/leak_annotations.h"
+#include "base/lazy_instance_helpers.h"
+#include "base/logging.h"
+#include "base/threading/thread_restrictions.h"
+
+// LazyInstance uses its own struct initializer-list style static
+// initialization, which does not require a constructor.
+#define LAZY_INSTANCE_INITIALIZER {}
+
+namespace base {
+
+template <typename Type>
+struct LazyInstanceTraitsBase {
+ static Type* New(void* instance) {
+ DCHECK_EQ(reinterpret_cast<uintptr_t>(instance) & (alignof(Type) - 1), 0u);
+ // Use placement new to initialize our instance in our preallocated space.
+ // The parenthesis is very important here to force POD type initialization.
+ return new (instance) Type();
+ }
+
+ static void CallDestructor(Type* instance) {
+ // Explicitly call the destructor.
+ instance->~Type();
+ }
+};
+
+// We pull out some of the functionality into non-templated functions, so we
+// can implement the more complicated pieces out of line in the .cc file.
+namespace internal {
+
+// This traits class causes destruction the contained Type at process exit via
+// AtExitManager. This is probably generally not what you want. Instead, prefer
+// Leaky below.
+template <typename Type>
+struct DestructorAtExitLazyInstanceTraits {
+ static const bool kRegisterOnExit = true;
+#if DCHECK_IS_ON()
+ static const bool kAllowedToAccessOnNonjoinableThread = false;
+#endif
+
+ static Type* New(void* instance) {
+ return LazyInstanceTraitsBase<Type>::New(instance);
+ }
+
+ static void Delete(Type* instance) {
+ LazyInstanceTraitsBase<Type>::CallDestructor(instance);
+ }
+};
+
+// Use LazyInstance<T>::Leaky for a less-verbose call-site typedef; e.g.:
+// base::LazyInstance<T>::Leaky my_leaky_lazy_instance;
+// instead of:
+// base::LazyInstance<T, base::internal::LeakyLazyInstanceTraits<T> >
+// my_leaky_lazy_instance;
+// (especially when T is MyLongTypeNameImplClientHolderFactory).
+// Only use this internal::-qualified verbose form to extend this traits class
+// (depending on its implementation details).
+template <typename Type>
+struct LeakyLazyInstanceTraits {
+ static const bool kRegisterOnExit = false;
+#if DCHECK_IS_ON()
+ static const bool kAllowedToAccessOnNonjoinableThread = true;
+#endif
+
+ static Type* New(void* instance) {
+ ANNOTATE_SCOPED_MEMORY_LEAK;
+ return LazyInstanceTraitsBase<Type>::New(instance);
+ }
+ static void Delete(Type* instance) {
+ }
+};
+
+template <typename Type>
+struct ErrorMustSelectLazyOrDestructorAtExitForLazyInstance {};
+
+} // namespace internal
+
+template <
+ typename Type,
+ typename Traits =
+ internal::ErrorMustSelectLazyOrDestructorAtExitForLazyInstance<Type>>
+class LazyInstance {
+ public:
+ // Do not define a destructor, as doing so makes LazyInstance a
+ // non-POD-struct. We don't want that because then a static initializer will
+ // be created to register the (empty) destructor with atexit() under MSVC, for
+ // example. We handle destruction of the contained Type class explicitly via
+ // the OnExit member function, where needed.
+ // ~LazyInstance() {}
+
+ // Convenience typedef to avoid having to repeat Type for leaky lazy
+ // instances.
+ typedef LazyInstance<Type, internal::LeakyLazyInstanceTraits<Type>> Leaky;
+ typedef LazyInstance<Type, internal::DestructorAtExitLazyInstanceTraits<Type>>
+ DestructorAtExit;
+
+ Type& Get() {
+ return *Pointer();
+ }
+
+ Type* Pointer() {
+#if DCHECK_IS_ON()
+ if (!Traits::kAllowedToAccessOnNonjoinableThread)
+ ThreadRestrictions::AssertSingletonAllowed();
+#endif
+
+ return subtle::GetOrCreateLazyPointer(
+ &private_instance_, &Traits::New, private_buf_,
+ Traits::kRegisterOnExit ? OnExit : nullptr, this);
+ }
+
+ // Returns true if the lazy instance has been created. Unlike Get() and
+ // Pointer(), calling IsCreated() will not instantiate the object of Type.
+ bool IsCreated() {
+ // Return true (i.e. "created") if |private_instance_| is either being
+ // created right now (i.e. |private_instance_| has value of
+ // internal::kLazyInstanceStateCreating) or was already created (i.e.
+ // |private_instance_| has any other non-zero value).
+ return 0 != subtle::NoBarrier_Load(&private_instance_);
+ }
+
+ // MSVC gives a warning that the alignment expands the size of the
+ // LazyInstance struct to make the size a multiple of the alignment. This
+ // is expected in this case.
+#if defined(OS_WIN)
+#pragma warning(push)
+#pragma warning(disable: 4324)
+#endif
+
+ // Effectively private: member data is only public to allow the linker to
+ // statically initialize it and to maintain a POD class. DO NOT USE FROM
+ // OUTSIDE THIS CLASS.
+ subtle::AtomicWord private_instance_;
+
+ // Preallocated space for the Type instance.
+ alignas(Type) char private_buf_[sizeof(Type)];
+
+#if defined(OS_WIN)
+#pragma warning(pop)
+#endif
+
+ private:
+ Type* instance() {
+ return reinterpret_cast<Type*>(subtle::NoBarrier_Load(&private_instance_));
+ }
+
+ // Adapter function for use with AtExit. This should be called single
+ // threaded, so don't synchronize across threads.
+ // Calling OnExit while the instance is in use by other threads is a mistake.
+ static void OnExit(void* lazy_instance) {
+ LazyInstance<Type, Traits>* me =
+ reinterpret_cast<LazyInstance<Type, Traits>*>(lazy_instance);
+ Traits::Delete(me->instance());
+ subtle::NoBarrier_Store(&me->private_instance_, 0);
+ }
+};
+
+} // namespace base
+
+#endif // BASE_LAZY_INSTANCE_H_
diff --git a/security/sandbox/chromium/base/lazy_instance_helpers.cc b/security/sandbox/chromium/base/lazy_instance_helpers.cc
new file mode 100644
index 0000000000..7b9e0de7c6
--- /dev/null
+++ b/security/sandbox/chromium/base/lazy_instance_helpers.cc
@@ -0,0 +1,64 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/lazy_instance_helpers.h"
+
+#include "base/at_exit.h"
+#include "base/atomicops.h"
+#include "base/threading/platform_thread.h"
+
+namespace base {
+namespace internal {
+
+bool NeedsLazyInstance(subtle::AtomicWord* state) {
+ // Try to create the instance, if we're the first, will go from 0 to
+ // kLazyInstanceStateCreating, otherwise we've already been beaten here.
+ // The memory access has no memory ordering as state 0 and
+ // kLazyInstanceStateCreating have no associated data (memory barriers are
+ // all about ordering of memory accesses to *associated* data).
+ if (subtle::NoBarrier_CompareAndSwap(state, 0, kLazyInstanceStateCreating) ==
+ 0) {
+ // Caller must create instance
+ return true;
+ }
+
+ // It's either in the process of being created, or already created. Spin.
+ // The load has acquire memory ordering as a thread which sees
+ // state_ == STATE_CREATED needs to acquire visibility over
+ // the associated data (buf_). Pairing Release_Store is in
+ // CompleteLazyInstance().
+ if (subtle::Acquire_Load(state) == kLazyInstanceStateCreating) {
+ const base::TimeTicks start = base::TimeTicks::Now();
+ do {
+ const base::TimeDelta elapsed = base::TimeTicks::Now() - start;
+ // Spin with YieldCurrentThread for at most one ms - this ensures maximum
+ // responsiveness. After that spin with Sleep(1ms) so that we don't burn
+ // excessive CPU time - this also avoids infinite loops due to priority
+ // inversions (https://crbug.com/797129).
+ if (elapsed < TimeDelta::FromMilliseconds(1))
+ PlatformThread::YieldCurrentThread();
+ else
+ PlatformThread::Sleep(TimeDelta::FromMilliseconds(1));
+ } while (subtle::Acquire_Load(state) == kLazyInstanceStateCreating);
+ }
+ // Someone else created the instance.
+ return false;
+}
+
+void CompleteLazyInstance(subtle::AtomicWord* state,
+ subtle::AtomicWord new_instance,
+ void (*destructor)(void*),
+ void* destructor_arg) {
+ // Instance is created, go from CREATING to CREATED (or reset it if
+ // |new_instance| is null). Releases visibility over |private_buf_| to
+ // readers. Pairing Acquire_Load is in NeedsLazyInstance().
+ subtle::Release_Store(state, new_instance);
+
+ // Make sure that the lazily instantiated object will get destroyed at exit.
+ if (new_instance && destructor)
+ AtExitManager::RegisterCallback(destructor, destructor_arg);
+}
+
+} // namespace internal
+} // namespace base
diff --git a/security/sandbox/chromium/base/lazy_instance_helpers.h b/security/sandbox/chromium/base/lazy_instance_helpers.h
new file mode 100644
index 0000000000..5a43d8b1f2
--- /dev/null
+++ b/security/sandbox/chromium/base/lazy_instance_helpers.h
@@ -0,0 +1,101 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_LAZY_INSTANCE_INTERNAL_H_
+#define BASE_LAZY_INSTANCE_INTERNAL_H_
+
+#include "base/atomicops.h"
+#include "base/base_export.h"
+#include "base/logging.h"
+
+// Helper methods used by LazyInstance and a few other base APIs for thread-safe
+// lazy construction.
+
+namespace base {
+namespace internal {
+
+// Our AtomicWord doubles as a spinlock, where a value of
+// kLazyInstanceStateCreating means the spinlock is being held for creation.
+constexpr subtle::AtomicWord kLazyInstanceStateCreating = 1;
+
+// Helper for GetOrCreateLazyPointer(). Checks if instance needs to be created.
+// If so returns true otherwise if another thread has beat us, waits for
+// instance to be created and returns false.
+BASE_EXPORT bool NeedsLazyInstance(subtle::AtomicWord* state);
+
+// Helper for GetOrCreateLazyPointer(). After creating an instance, this is
+// called to register the dtor to be called at program exit and to update the
+// atomic state to hold the |new_instance|
+BASE_EXPORT void CompleteLazyInstance(subtle::AtomicWord* state,
+ subtle::AtomicWord new_instance,
+ void (*destructor)(void*),
+ void* destructor_arg);
+
+} // namespace internal
+
+namespace subtle {
+
+// If |state| is uninitialized (zero), constructs a value using
+// |creator_func(creator_arg)|, stores it into |state| and registers
+// |destructor(destructor_arg)| to be called when the current AtExitManager goes
+// out of scope. Then, returns the value stored in |state|. It is safe to have
+// concurrent calls to this function with the same |state|. |creator_func| may
+// return nullptr if it doesn't want to create an instance anymore (e.g. on
+// shutdown), it is from then on required to return nullptr to all callers (ref.
+// StaticMemorySingletonTraits). In that case, callers need to synchronize
+// before |creator_func| may return a non-null instance again (ref.
+// StaticMemorySingletonTraits::ResurectForTesting()).
+// Implementation note on |creator_func/creator_arg|. It makes for ugly adapters
+// but it avoids redundant template instantiations (e.g. saves 27KB in
+// chrome.dll) because linker is able to fold these for multiple Types but
+// couldn't with the more advanced CreatorFunc template type which in turn
+// improves code locality (and application startup) -- ref.
+// https://chromium-review.googlesource.com/c/chromium/src/+/530984/5/base/lazy_instance.h#140,
+// worsened by https://chromium-review.googlesource.com/c/chromium/src/+/868013
+// and caught then as https://crbug.com/804034.
+template <typename Type>
+Type* GetOrCreateLazyPointer(subtle::AtomicWord* state,
+ Type* (*creator_func)(void*),
+ void* creator_arg,
+ void (*destructor)(void*),
+ void* destructor_arg) {
+ DCHECK(state);
+ DCHECK(creator_func);
+
+ // If any bit in the created mask is true, the instance has already been
+ // fully constructed.
+ constexpr subtle::AtomicWord kLazyInstanceCreatedMask =
+ ~internal::kLazyInstanceStateCreating;
+
+ // We will hopefully have fast access when the instance is already created.
+ // Since a thread sees |state| == 0 or kLazyInstanceStateCreating at most
+ // once, the load is taken out of NeedsLazyInstance() as a fast-path. The load
+ // has acquire memory ordering as a thread which sees |state| > creating needs
+ // to acquire visibility over the associated data. Pairing Release_Store is in
+ // CompleteLazyInstance().
+ subtle::AtomicWord instance = subtle::Acquire_Load(state);
+ if (!(instance & kLazyInstanceCreatedMask)) {
+ if (internal::NeedsLazyInstance(state)) {
+ // This thread won the race and is now responsible for creating the
+ // instance and storing it back into |state|.
+ instance =
+ reinterpret_cast<subtle::AtomicWord>((*creator_func)(creator_arg));
+ internal::CompleteLazyInstance(state, instance, destructor,
+ destructor_arg);
+ } else {
+ // This thread lost the race but now has visibility over the constructed
+ // instance (NeedsLazyInstance() doesn't return until the constructing
+ // thread releases the instance via CompleteLazyInstance()).
+ instance = subtle::Acquire_Load(state);
+ DCHECK(instance & kLazyInstanceCreatedMask);
+ }
+ }
+ return reinterpret_cast<Type*>(instance);
+}
+
+} // namespace subtle
+
+} // namespace base
+
+#endif // BASE_LAZY_INSTANCE_INTERNAL_H_
diff --git a/security/sandbox/chromium/base/location.cc b/security/sandbox/chromium/base/location.cc
new file mode 100644
index 0000000000..cf189341c7
--- /dev/null
+++ b/security/sandbox/chromium/base/location.cc
@@ -0,0 +1,96 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/location.h"
+
+#if defined(COMPILER_MSVC)
+#include <intrin.h>
+#endif
+
+#include "base/compiler_specific.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/stringprintf.h"
+#include "build/build_config.h"
+
+namespace base {
+
+Location::Location() = default;
+Location::Location(const Location& other) = default;
+
+Location::Location(const char* file_name, const void* program_counter)
+ : file_name_(file_name), program_counter_(program_counter) {}
+
+Location::Location(const char* function_name,
+ const char* file_name,
+ int line_number,
+ const void* program_counter)
+ : function_name_(function_name),
+ file_name_(file_name),
+ line_number_(line_number),
+ program_counter_(program_counter) {
+#if !defined(OS_NACL)
+ // The program counter should not be null except in a default constructed
+ // (empty) Location object. This value is used for identity, so if it doesn't
+ // uniquely identify a location, things will break.
+ //
+ // The program counter isn't supported in NaCl so location objects won't work
+ // properly in that context.
+ DCHECK(program_counter);
+#endif
+}
+
+std::string Location::ToString() const {
+ if (has_source_info()) {
+ return std::string(function_name_) + "@" + file_name_ + ":" +
+ NumberToString(line_number_);
+ }
+ return StringPrintf("pc:%p", program_counter_);
+}
+
+#if defined(COMPILER_MSVC)
+#define RETURN_ADDRESS() _ReturnAddress()
+#elif defined(COMPILER_GCC) && !defined(OS_NACL)
+#define RETURN_ADDRESS() \
+ __builtin_extract_return_addr(__builtin_return_address(0))
+#else
+#define RETURN_ADDRESS() nullptr
+#endif
+
+// static
+NOINLINE Location Location::CreateFromHere(const char* file_name) {
+ return Location(file_name, RETURN_ADDRESS());
+}
+
+// static
+NOINLINE Location Location::CreateFromHere(const char* function_name,
+ const char* file_name,
+ int line_number) {
+ return Location(function_name, file_name, line_number, RETURN_ADDRESS());
+}
+
+#if SUPPORTS_LOCATION_BUILTINS && BUILDFLAG(ENABLE_LOCATION_SOURCE)
+// static
+NOINLINE Location Location::Current(const char* function_name,
+ const char* file_name,
+ int line_number) {
+ return Location(function_name, file_name, line_number, RETURN_ADDRESS());
+}
+#elif SUPPORTS_LOCATION_BUILTINS
+// static
+NOINLINE Location Location::Current(const char* file_name) {
+ return Location(file_name, RETURN_ADDRESS());
+}
+#else
+// static
+NOINLINE Location Location::Current() {
+ return Location(nullptr, RETURN_ADDRESS());
+}
+#endif
+
+//------------------------------------------------------------------------------
+NOINLINE const void* GetProgramCounter() {
+ return RETURN_ADDRESS();
+}
+
+} // namespace base
diff --git a/security/sandbox/chromium/base/location.h b/security/sandbox/chromium/base/location.h
new file mode 100644
index 0000000000..bcc3ca0e14
--- /dev/null
+++ b/security/sandbox/chromium/base/location.h
@@ -0,0 +1,142 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_LOCATION_H_
+#define BASE_LOCATION_H_
+
+#include <stddef.h>
+
+#include <cassert>
+#include <functional>
+#include <string>
+
+#include "base/base_export.h"
+#include "base/debug/debugging_buildflags.h"
+#include "base/hash/hash.h"
+#include "build/build_config.h"
+
+namespace base {
+
+#if defined(__has_builtin)
+// Clang allows detection of these builtins.
+#define SUPPORTS_LOCATION_BUILTINS \
+ (__has_builtin(__builtin_FUNCTION) && __has_builtin(__builtin_FILE) && \
+ __has_builtin(__builtin_LINE))
+#elif defined(COMPILER_GCC) && __GNUC__ >= 7
+// GCC has supported these for a long time, but they point at the function
+// declaration in the case of default arguments, rather than at the call site.
+#define SUPPORTS_LOCATION_BUILTINS 1
+#else
+#define SUPPORTS_LOCATION_BUILTINS 0
+#endif
+
+// Location provides basic info where of an object was constructed, or was
+// significantly brought to life.
+class BASE_EXPORT Location {
+ public:
+ Location();
+ Location(const Location& other);
+
+ // Only initializes the file name and program counter, the source information
+ // will be null for the strings, and -1 for the line number.
+ // TODO(http://crbug.com/760702) remove file name from this constructor.
+ Location(const char* file_name, const void* program_counter);
+
+ // Constructor should be called with a long-lived char*, such as __FILE__.
+ // It assumes the provided value will persist as a global constant, and it
+ // will not make a copy of it.
+ Location(const char* function_name,
+ const char* file_name,
+ int line_number,
+ const void* program_counter);
+
+ // Comparator for hash map insertion. The program counter should uniquely
+ // identify a location.
+ bool operator==(const Location& other) const {
+ return program_counter_ == other.program_counter_;
+ }
+
+ // Returns true if there is source code location info. If this is false,
+ // the Location object only contains a program counter or is
+ // default-initialized (the program counter is also null).
+ bool has_source_info() const { return function_name_ && file_name_; }
+
+ // Will be nullptr for default initialized Location objects and when source
+ // names are disabled.
+ const char* function_name() const { return function_name_; }
+
+ // Will be nullptr for default initialized Location objects and when source
+ // names are disabled.
+ const char* file_name() const { return file_name_; }
+
+ // Will be -1 for default initialized Location objects and when source names
+ // are disabled.
+ int line_number() const { return line_number_; }
+
+ // The address of the code generating this Location object. Should always be
+ // valid except for default initialized Location objects, which will be
+ // nullptr.
+ const void* program_counter() const { return program_counter_; }
+
+ // Converts to the most user-readable form possible. If function and filename
+ // are not available, this will return "pc:<hex address>".
+ std::string ToString() const;
+
+ static Location CreateFromHere(const char* file_name);
+ static Location CreateFromHere(const char* function_name,
+ const char* file_name,
+ int line_number);
+
+#if SUPPORTS_LOCATION_BUILTINS && BUILDFLAG(ENABLE_LOCATION_SOURCE)
+ static Location Current(const char* function_name = __builtin_FUNCTION(),
+ const char* file_name = __builtin_FILE(),
+ int line_number = __builtin_LINE());
+#elif SUPPORTS_LOCATION_BUILTINS
+ static Location Current(const char* file_name = __builtin_FILE());
+#else
+ static Location Current();
+#endif
+
+ private:
+ const char* function_name_ = nullptr;
+ const char* file_name_ = nullptr;
+ int line_number_ = -1;
+ const void* program_counter_ = nullptr;
+};
+
+BASE_EXPORT const void* GetProgramCounter();
+
+// The macros defined here will expand to the current function.
+#if BUILDFLAG(ENABLE_LOCATION_SOURCE)
+
+// Full source information should be included.
+#define FROM_HERE FROM_HERE_WITH_EXPLICIT_FUNCTION(__func__)
+#define FROM_HERE_WITH_EXPLICIT_FUNCTION(function_name) \
+ ::base::Location::CreateFromHere(function_name, __FILE__, __LINE__)
+
+#else
+
+// TODO(http://crbug.com/760702) remove the __FILE__ argument from these calls.
+#define FROM_HERE ::base::Location::CreateFromHere(__FILE__)
+#define FROM_HERE_WITH_EXPLICIT_FUNCTION(function_name) \
+ ::base::Location::CreateFromHere(function_name, __FILE__, -1)
+
+#endif
+
+} // namespace base
+
+namespace std {
+
+// Specialization for using Location in hash tables.
+template <>
+struct hash<::base::Location> {
+ std::size_t operator()(const ::base::Location& loc) const {
+ const void* program_counter = loc.program_counter();
+ return base::FastHash(base::as_bytes(base::make_span(&program_counter, 1)));
+ }
+};
+
+} // namespace std
+
+#endif // BASE_LOCATION_H_
diff --git a/security/sandbox/chromium/base/logging.h b/security/sandbox/chromium/base/logging.h
new file mode 100644
index 0000000000..cb24b94833
--- /dev/null
+++ b/security/sandbox/chromium/base/logging.h
@@ -0,0 +1,1077 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_LOGGING_H_
+#define BASE_LOGGING_H_
+
+#include <stddef.h>
+
+#include <cassert>
+#include <cstdint>
+#include <cstring>
+#include <sstream>
+#include <string>
+#include <type_traits>
+#include <utility>
+
+#include "base/base_export.h"
+#include "base/callback_forward.h"
+#include "base/compiler_specific.h"
+#include "base/immediate_crash.h"
+#include "base/logging_buildflags.h"
+#include "base/macros.h"
+#include "base/scoped_clear_last_error.h"
+#include "base/strings/string_piece_forward.h"
+#include "base/template_util.h"
+#include "build/build_config.h"
+
+#if defined(OS_CHROMEOS)
+#include <cstdio>
+#endif
+
+//
+// Optional message capabilities
+// -----------------------------
+// Assertion failed messages and fatal errors are displayed in a dialog box
+// before the application exits. However, running this UI creates a message
+// loop, which causes application messages to be processed and potentially
+// dispatched to existing application windows. Since the application is in a
+// bad state when this assertion dialog is displayed, these messages may not
+// get processed and hang the dialog, or the application might go crazy.
+//
+// Therefore, it can be beneficial to display the error dialog in a separate
+// process from the main application. When the logging system needs to display
+// a fatal error dialog box, it will look for a program called
+// "DebugMessage.exe" in the same directory as the application executable. It
+// will run this application with the message as the command line, and will
+// not include the name of the application as is traditional for easier
+// parsing.
+//
+// The code for DebugMessage.exe is only one line. In WinMain, do:
+// MessageBox(NULL, GetCommandLineW(), L"Fatal Error", 0);
+//
+// If DebugMessage.exe is not found, the logging code will use a normal
+// MessageBox, potentially causing the problems discussed above.
+
+// Instructions
+// ------------
+//
+// Make a bunch of macros for logging. The way to log things is to stream
+// things to LOG(<a particular severity level>). E.g.,
+//
+// LOG(INFO) << "Found " << num_cookies << " cookies";
+//
+// You can also do conditional logging:
+//
+// LOG_IF(INFO, num_cookies > 10) << "Got lots of cookies";
+//
+// The CHECK(condition) macro is active in both debug and release builds and
+// effectively performs a LOG(FATAL) which terminates the process and
+// generates a crashdump unless a debugger is attached.
+//
+// There are also "debug mode" logging macros like the ones above:
+//
+// DLOG(INFO) << "Found cookies";
+//
+// DLOG_IF(INFO, num_cookies > 10) << "Got lots of cookies";
+//
+// All "debug mode" logging is compiled away to nothing for non-debug mode
+// compiles. LOG_IF and development flags also work well together
+// because the code can be compiled away sometimes.
+//
+// We also have
+//
+// LOG_ASSERT(assertion);
+// DLOG_ASSERT(assertion);
+//
+// which is syntactic sugar for {,D}LOG_IF(FATAL, assert fails) << assertion;
+//
+// There are "verbose level" logging macros. They look like
+//
+// VLOG(1) << "I'm printed when you run the program with --v=1 or more";
+// VLOG(2) << "I'm printed when you run the program with --v=2 or more";
+//
+// These always log at the INFO log level (when they log at all).
+// The verbose logging can also be turned on module-by-module. For instance,
+// --vmodule=profile=2,icon_loader=1,browser_*=3,*/chromeos/*=4 --v=0
+// will cause:
+// a. VLOG(2) and lower messages to be printed from profile.{h,cc}
+// b. VLOG(1) and lower messages to be printed from icon_loader.{h,cc}
+// c. VLOG(3) and lower messages to be printed from files prefixed with
+// "browser"
+// d. VLOG(4) and lower messages to be printed from files under a
+// "chromeos" directory.
+// e. VLOG(0) and lower messages to be printed from elsewhere
+//
+// The wildcarding functionality shown by (c) supports both '*' (match
+// 0 or more characters) and '?' (match any single character)
+// wildcards. Any pattern containing a forward or backward slash will
+// be tested against the whole pathname and not just the module.
+// E.g., "*/foo/bar/*=2" would change the logging level for all code
+// in source files under a "foo/bar" directory.
+//
+// There's also VLOG_IS_ON(n) "verbose level" condition macro. To be used as
+//
+// if (VLOG_IS_ON(2)) {
+// // do some logging preparation and logging
+// // that can't be accomplished with just VLOG(2) << ...;
+// }
+//
+// There is also a VLOG_IF "verbose level" condition macro for sample
+// cases, when some extra computation and preparation for logs is not
+// needed.
+//
+// VLOG_IF(1, (size > 1024))
+// << "I'm printed when size is more than 1024 and when you run the "
+// "program with --v=1 or more";
+//
+// We also override the standard 'assert' to use 'DLOG_ASSERT'.
+//
+// Lastly, there is:
+//
+// PLOG(ERROR) << "Couldn't do foo";
+// DPLOG(ERROR) << "Couldn't do foo";
+// PLOG_IF(ERROR, cond) << "Couldn't do foo";
+// DPLOG_IF(ERROR, cond) << "Couldn't do foo";
+// PCHECK(condition) << "Couldn't do foo";
+// DPCHECK(condition) << "Couldn't do foo";
+//
+// which append the last system error to the message in string form (taken from
+// GetLastError() on Windows and errno on POSIX).
+//
+// The supported severity levels for macros that allow you to specify one
+// are (in increasing order of severity) INFO, WARNING, ERROR, and FATAL.
+//
+// Very important: logging a message at the FATAL severity level causes
+// the program to terminate (after the message is logged).
+//
+// There is the special severity of DFATAL, which logs FATAL in debug mode,
+// ERROR in normal mode.
+//
+// Output is of the format, for example:
+// [3816:3877:0812/234555.406952:VERBOSE1:drm_device_handle.cc(90)] Succeeded
+// authenticating /dev/dri/card0 in 0 ms with 1 attempt(s)
+//
+// The colon separated fields inside the brackets are as follows:
+// 0. An optional Logfile prefix (not included in this example)
+// 1. Process ID
+// 2. Thread ID
+// 3. The date/time of the log message, in MMDD/HHMMSS.Milliseconds format
+// 4. The log level
+// 5. The filename and line number where the log was instantiated
+//
+// Note that the visibility can be changed by setting preferences in
+// SetLogItems()
+
+namespace logging {
+
+// TODO(avi): do we want to do a unification of character types here?
+#if defined(OS_WIN)
+typedef wchar_t PathChar;
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+typedef char PathChar;
+#endif
+
+// A bitmask of potential logging destinations.
+using LoggingDestination = uint32_t;
+// Specifies where logs will be written. Multiple destinations can be specified
+// with bitwise OR.
+// Unless destination is LOG_NONE, all logs with severity ERROR and above will
+// be written to stderr in addition to the specified destination.
+enum : uint32_t {
+ LOG_NONE = 0,
+ LOG_TO_FILE = 1 << 0,
+ LOG_TO_SYSTEM_DEBUG_LOG = 1 << 1,
+ LOG_TO_STDERR = 1 << 2,
+
+ LOG_TO_ALL = LOG_TO_FILE | LOG_TO_SYSTEM_DEBUG_LOG | LOG_TO_STDERR,
+
+// On Windows, use a file next to the exe.
+// On POSIX platforms, where it may not even be possible to locate the
+// executable on disk, use stderr.
+// On Fuchsia, use the Fuchsia logging service.
+#if defined(OS_FUCHSIA) || defined(OS_NACL)
+ LOG_DEFAULT = LOG_TO_SYSTEM_DEBUG_LOG,
+#elif defined(OS_WIN)
+ LOG_DEFAULT = LOG_TO_FILE,
+#elif defined(OS_POSIX)
+ LOG_DEFAULT = LOG_TO_SYSTEM_DEBUG_LOG | LOG_TO_STDERR,
+#endif
+};
+
+// Indicates that the log file should be locked when being written to.
+// Unless there is only one single-threaded process that is logging to
+// the log file, the file should be locked during writes to make each
+// log output atomic. Other writers will block.
+//
+// All processes writing to the log file must have their locking set for it to
+// work properly. Defaults to LOCK_LOG_FILE.
+enum LogLockingState { LOCK_LOG_FILE, DONT_LOCK_LOG_FILE };
+
+// On startup, should we delete or append to an existing log file (if any)?
+// Defaults to APPEND_TO_OLD_LOG_FILE.
+enum OldFileDeletionState { DELETE_OLD_LOG_FILE, APPEND_TO_OLD_LOG_FILE };
+
+struct BASE_EXPORT LoggingSettings {
+ // Equivalent to logging destination enum, but allows for multiple
+ // destinations.
+ uint32_t logging_dest = LOG_DEFAULT;
+
+ // The four settings below have an effect only when LOG_TO_FILE is
+ // set in |logging_dest|.
+ const PathChar* log_file_path = nullptr;
+ LogLockingState lock_log = LOCK_LOG_FILE;
+ OldFileDeletionState delete_old = APPEND_TO_OLD_LOG_FILE;
+#if defined(OS_CHROMEOS)
+ // Contains an optional file that logs should be written to. If present,
+ // |log_file_path| will be ignored, and the logging system will take ownership
+ // of the FILE. If there's an error writing to this file, no fallback paths
+ // will be opened.
+ FILE* log_file = nullptr;
+#endif
+};
+
+// Define different names for the BaseInitLoggingImpl() function depending on
+// whether NDEBUG is defined or not so that we'll fail to link if someone tries
+// to compile logging.cc with NDEBUG but includes logging.h without defining it,
+// or vice versa.
+#if defined(NDEBUG)
+#define BaseInitLoggingImpl BaseInitLoggingImpl_built_with_NDEBUG
+#else
+#define BaseInitLoggingImpl BaseInitLoggingImpl_built_without_NDEBUG
+#endif
+
+// Implementation of the InitLogging() method declared below. We use a
+// more-specific name so we can #define it above without affecting other code
+// that has named stuff "InitLogging".
+BASE_EXPORT bool BaseInitLoggingImpl(const LoggingSettings& settings);
+
+// Sets the log file name and other global logging state. Calling this function
+// is recommended, and is normally done at the beginning of application init.
+// If you don't call it, all the flags will be initialized to their default
+// values, and there is a race condition that may leak a critical section
+// object if two threads try to do the first log at the same time.
+// See the definition of the enums above for descriptions and default values.
+//
+// The default log file is initialized to "debug.log" in the application
+// directory. You probably don't want this, especially since the program
+// directory may not be writable on an enduser's system.
+//
+// This function may be called a second time to re-direct logging (e.g after
+// loging in to a user partition), however it should never be called more than
+// twice.
+inline bool InitLogging(const LoggingSettings& settings) {
+ return BaseInitLoggingImpl(settings);
+}
+
+// Sets the log level. Anything at or above this level will be written to the
+// log file/displayed to the user (if applicable). Anything below this level
+// will be silently ignored. The log level defaults to 0 (everything is logged
+// up to level INFO) if this function is not called.
+// Note that log messages for VLOG(x) are logged at level -x, so setting
+// the min log level to negative values enables verbose logging.
+BASE_EXPORT void SetMinLogLevel(int level);
+
+// Gets the current log level.
+BASE_EXPORT int GetMinLogLevel();
+
+// Used by LOG_IS_ON to lazy-evaluate stream arguments.
+BASE_EXPORT bool ShouldCreateLogMessage(int severity);
+
+// Gets the VLOG default verbosity level.
+BASE_EXPORT int GetVlogVerbosity();
+
+// Note that |N| is the size *with* the null terminator.
+BASE_EXPORT int GetVlogLevelHelper(const char* file_start, size_t N);
+
+// Gets the current vlog level for the given file (usually taken from __FILE__).
+template <size_t N>
+int GetVlogLevel(const char (&file)[N]) {
+ return GetVlogLevelHelper(file, N);
+}
+
+// Sets the common items you want to be prepended to each log message.
+// process and thread IDs default to off, the timestamp defaults to on.
+// If this function is not called, logging defaults to writing the timestamp
+// only.
+BASE_EXPORT void SetLogItems(bool enable_process_id, bool enable_thread_id,
+ bool enable_timestamp, bool enable_tickcount);
+
+// Sets an optional prefix to add to each log message. |prefix| is not copied
+// and should be a raw string constant. |prefix| must only contain ASCII letters
+// to avoid confusion with PIDs and timestamps. Pass null to remove the prefix.
+// Logging defaults to no prefix.
+BASE_EXPORT void SetLogPrefix(const char* prefix);
+
+// Sets whether or not you'd like to see fatal debug messages popped up in
+// a dialog box or not.
+// Dialogs are not shown by default.
+BASE_EXPORT void SetShowErrorDialogs(bool enable_dialogs);
+
+// Sets the Log Assert Handler that will be used to notify of check failures.
+// Resets Log Assert Handler on object destruction.
+// The default handler shows a dialog box and then terminate the process,
+// however clients can use this function to override with their own handling
+// (e.g. a silent one for Unit Tests)
+using LogAssertHandlerFunction =
+ base::RepeatingCallback<void(const char* file,
+ int line,
+ const base::StringPiece message,
+ const base::StringPiece stack_trace)>;
+
+class BASE_EXPORT ScopedLogAssertHandler {
+ public:
+ explicit ScopedLogAssertHandler(LogAssertHandlerFunction handler);
+ ~ScopedLogAssertHandler();
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ScopedLogAssertHandler);
+};
+
+// Sets the Log Message Handler that gets passed every log message before
+// it's sent to other log destinations (if any).
+// Returns true to signal that it handled the message and the message
+// should not be sent to other log destinations.
+typedef bool (*LogMessageHandlerFunction)(int severity,
+ const char* file, int line, size_t message_start, const std::string& str);
+BASE_EXPORT void SetLogMessageHandler(LogMessageHandlerFunction handler);
+BASE_EXPORT LogMessageHandlerFunction GetLogMessageHandler();
+
+// The ANALYZER_ASSUME_TRUE(bool arg) macro adds compiler-specific hints
+// to Clang which control what code paths are statically analyzed,
+// and is meant to be used in conjunction with assert & assert-like functions.
+// The expression is passed straight through if analysis isn't enabled.
+//
+// ANALYZER_SKIP_THIS_PATH() suppresses static analysis for the current
+// codepath and any other branching codepaths that might follow.
+#if defined(__clang_analyzer__)
+
+inline constexpr bool AnalyzerNoReturn() __attribute__((analyzer_noreturn)) {
+ return false;
+}
+
+inline constexpr bool AnalyzerAssumeTrue(bool arg) {
+ // AnalyzerNoReturn() is invoked and analysis is terminated if |arg| is
+ // false.
+ return arg || AnalyzerNoReturn();
+}
+
+#define ANALYZER_ASSUME_TRUE(arg) logging::AnalyzerAssumeTrue(!!(arg))
+#define ANALYZER_SKIP_THIS_PATH() \
+ static_cast<void>(::logging::AnalyzerNoReturn())
+#define ANALYZER_ALLOW_UNUSED(var) static_cast<void>(var);
+
+#else // !defined(__clang_analyzer__)
+
+#define ANALYZER_ASSUME_TRUE(arg) (arg)
+#define ANALYZER_SKIP_THIS_PATH()
+#define ANALYZER_ALLOW_UNUSED(var) static_cast<void>(var);
+
+#endif // defined(__clang_analyzer__)
+
+typedef int LogSeverity;
+const LogSeverity LOG_VERBOSE = -1; // This is level 1 verbosity
+// Note: the log severities are used to index into the array of names,
+// see log_severity_names.
+const LogSeverity LOG_INFO = 0;
+const LogSeverity LOG_WARNING = 1;
+const LogSeverity LOG_ERROR = 2;
+const LogSeverity LOG_FATAL = 3;
+const LogSeverity LOG_NUM_SEVERITIES = 4;
+
+// LOG_DFATAL is LOG_FATAL in debug mode, ERROR in normal mode
+#if defined(NDEBUG)
+const LogSeverity LOG_DFATAL = LOG_ERROR;
+#else
+const LogSeverity LOG_DFATAL = LOG_FATAL;
+#endif
+
+// A few definitions of macros that don't generate much code. These are used
+// by LOG() and LOG_IF, etc. Since these are used all over our code, it's
+// better to have compact code for these operations.
+#define COMPACT_GOOGLE_LOG_EX_INFO(ClassName, ...) \
+ ::logging::ClassName(__FILE__, __LINE__, ::logging::LOG_INFO, ##__VA_ARGS__)
+#define COMPACT_GOOGLE_LOG_EX_WARNING(ClassName, ...) \
+ ::logging::ClassName(__FILE__, __LINE__, ::logging::LOG_WARNING, \
+ ##__VA_ARGS__)
+#define COMPACT_GOOGLE_LOG_EX_ERROR(ClassName, ...) \
+ ::logging::ClassName(__FILE__, __LINE__, ::logging::LOG_ERROR, ##__VA_ARGS__)
+#define COMPACT_GOOGLE_LOG_EX_FATAL(ClassName, ...) \
+ ::logging::ClassName(__FILE__, __LINE__, ::logging::LOG_FATAL, ##__VA_ARGS__)
+#define COMPACT_GOOGLE_LOG_EX_DFATAL(ClassName, ...) \
+ ::logging::ClassName(__FILE__, __LINE__, ::logging::LOG_DFATAL, ##__VA_ARGS__)
+#define COMPACT_GOOGLE_LOG_EX_DCHECK(ClassName, ...) \
+ ::logging::ClassName(__FILE__, __LINE__, ::logging::LOG_DCHECK, ##__VA_ARGS__)
+
+#define COMPACT_GOOGLE_LOG_INFO COMPACT_GOOGLE_LOG_EX_INFO(LogMessage)
+#define COMPACT_GOOGLE_LOG_WARNING COMPACT_GOOGLE_LOG_EX_WARNING(LogMessage)
+#define COMPACT_GOOGLE_LOG_ERROR COMPACT_GOOGLE_LOG_EX_ERROR(LogMessage)
+#define COMPACT_GOOGLE_LOG_FATAL COMPACT_GOOGLE_LOG_EX_FATAL(LogMessage)
+#define COMPACT_GOOGLE_LOG_DFATAL COMPACT_GOOGLE_LOG_EX_DFATAL(LogMessage)
+#define COMPACT_GOOGLE_LOG_DCHECK COMPACT_GOOGLE_LOG_EX_DCHECK(LogMessage)
+
+#if defined(OS_WIN)
+// wingdi.h defines ERROR to be 0. When we call LOG(ERROR), it gets
+// substituted with 0, and it expands to COMPACT_GOOGLE_LOG_0. To allow us
+// to keep using this syntax, we define this macro to do the same thing
+// as COMPACT_GOOGLE_LOG_ERROR, and also define ERROR the same way that
+// the Windows SDK does for consistency.
+#define ERROR 0
+#define COMPACT_GOOGLE_LOG_EX_0(ClassName, ...) \
+ COMPACT_GOOGLE_LOG_EX_ERROR(ClassName , ##__VA_ARGS__)
+#define COMPACT_GOOGLE_LOG_0 COMPACT_GOOGLE_LOG_ERROR
+// Needed for LOG_IS_ON(ERROR).
+const LogSeverity LOG_0 = LOG_ERROR;
+#endif
+
+// As special cases, we can assume that LOG_IS_ON(FATAL) always holds. Also,
+// LOG_IS_ON(DFATAL) always holds in debug mode. In particular, CHECK()s will
+// always fire if they fail.
+#define LOG_IS_ON(severity) \
+ (::logging::ShouldCreateLogMessage(::logging::LOG_##severity))
+
+// We don't do any caching tricks with VLOG_IS_ON() like the
+// google-glog version since it increases binary size. This means
+// that using the v-logging functions in conjunction with --vmodule
+// may be slow.
+#define VLOG_IS_ON(verboselevel) \
+ ((verboselevel) <= ::logging::GetVlogLevel(__FILE__))
+
+// Helper macro which avoids evaluating the arguments to a stream if
+// the condition doesn't hold. Condition is evaluated once and only once.
+#define LAZY_STREAM(stream, condition) \
+ !(condition) ? (void) 0 : ::logging::LogMessageVoidify() & (stream)
+
+// We use the preprocessor's merging operator, "##", so that, e.g.,
+// LOG(INFO) becomes the token COMPACT_GOOGLE_LOG_INFO. There's some funny
+// subtle difference between ostream member streaming functions (e.g.,
+// ostream::operator<<(int) and ostream non-member streaming functions
+// (e.g., ::operator<<(ostream&, string&): it turns out that it's
+// impossible to stream something like a string directly to an unnamed
+// ostream. We employ a neat hack by calling the stream() member
+// function of LogMessage which seems to avoid the problem.
+#define LOG_STREAM(severity) COMPACT_GOOGLE_LOG_ ## severity.stream()
+
+#define LOG(severity) LAZY_STREAM(LOG_STREAM(severity), LOG_IS_ON(severity))
+#define LOG_IF(severity, condition) \
+ LAZY_STREAM(LOG_STREAM(severity), LOG_IS_ON(severity) && (condition))
+
+// The VLOG macros log with negative verbosities.
+#define VLOG_STREAM(verbose_level) \
+ ::logging::LogMessage(__FILE__, __LINE__, -verbose_level).stream()
+
+#define VLOG(verbose_level) \
+ LAZY_STREAM(VLOG_STREAM(verbose_level), VLOG_IS_ON(verbose_level))
+
+#define VLOG_IF(verbose_level, condition) \
+ LAZY_STREAM(VLOG_STREAM(verbose_level), \
+ VLOG_IS_ON(verbose_level) && (condition))
+
+#if defined (OS_WIN)
+#define VPLOG_STREAM(verbose_level) \
+ ::logging::Win32ErrorLogMessage(__FILE__, __LINE__, -verbose_level, \
+ ::logging::GetLastSystemErrorCode()).stream()
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+#define VPLOG_STREAM(verbose_level) \
+ ::logging::ErrnoLogMessage(__FILE__, __LINE__, -verbose_level, \
+ ::logging::GetLastSystemErrorCode()).stream()
+#endif
+
+#define VPLOG(verbose_level) \
+ LAZY_STREAM(VPLOG_STREAM(verbose_level), VLOG_IS_ON(verbose_level))
+
+#define VPLOG_IF(verbose_level, condition) \
+ LAZY_STREAM(VPLOG_STREAM(verbose_level), \
+ VLOG_IS_ON(verbose_level) && (condition))
+
+// TODO(akalin): Add more VLOG variants, e.g. VPLOG.
+
+#define LOG_ASSERT(condition) \
+ LOG_IF(FATAL, !(ANALYZER_ASSUME_TRUE(condition))) \
+ << "Assert failed: " #condition ". "
+
+#if defined(OS_WIN)
+#define PLOG_STREAM(severity) \
+ COMPACT_GOOGLE_LOG_EX_ ## severity(Win32ErrorLogMessage, \
+ ::logging::GetLastSystemErrorCode()).stream()
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+#define PLOG_STREAM(severity) \
+ COMPACT_GOOGLE_LOG_EX_ ## severity(ErrnoLogMessage, \
+ ::logging::GetLastSystemErrorCode()).stream()
+#endif
+
+#define PLOG(severity) \
+ LAZY_STREAM(PLOG_STREAM(severity), LOG_IS_ON(severity))
+
+#define PLOG_IF(severity, condition) \
+ LAZY_STREAM(PLOG_STREAM(severity), LOG_IS_ON(severity) && (condition))
+
+BASE_EXPORT extern std::ostream* g_swallow_stream;
+
+// Note that g_swallow_stream is used instead of an arbitrary LOG() stream to
+// avoid the creation of an object with a non-trivial destructor (LogMessage).
+// On MSVC x86 (checked on 2015 Update 3), this causes a few additional
+// pointless instructions to be emitted even at full optimization level, even
+// though the : arm of the ternary operator is clearly never executed. Using a
+// simpler object to be &'d with Voidify() avoids these extra instructions.
+// Using a simpler POD object with a templated operator<< also works to avoid
+// these instructions. However, this causes warnings on statically defined
+// implementations of operator<<(std::ostream, ...) in some .cc files, because
+// they become defined-but-unreferenced functions. A reinterpret_cast of 0 to an
+// ostream* also is not suitable, because some compilers warn of undefined
+// behavior.
+#define EAT_STREAM_PARAMETERS \
+ true ? (void)0 \
+ : ::logging::LogMessageVoidify() & (*::logging::g_swallow_stream)
+
+// Captures the result of a CHECK_EQ (for example) and facilitates testing as a
+// boolean.
+class CheckOpResult {
+ public:
+ // |message| must be non-null if and only if the check failed.
+ constexpr CheckOpResult(std::string* message) : message_(message) {}
+ // Returns true if the check succeeded.
+ constexpr operator bool() const { return !message_; }
+ // Returns the message.
+ std::string* message() { return message_; }
+
+ private:
+ std::string* message_;
+};
+
+// CHECK dies with a fatal error if condition is not true. It is *not*
+// controlled by NDEBUG, so the check will be executed regardless of
+// compilation mode.
+//
+// We make sure CHECK et al. always evaluates their arguments, as
+// doing CHECK(FunctionWithSideEffect()) is a common idiom.
+
+#if defined(OFFICIAL_BUILD) && defined(NDEBUG)
+
+// Make all CHECK functions discard their log strings to reduce code bloat, and
+// improve performance, for official release builds.
+//
+// This is not calling BreakDebugger since this is called frequently, and
+// calling an out-of-line function instead of a noreturn inline macro prevents
+// compiler optimizations.
+#define CHECK(condition) \
+ UNLIKELY(!(condition)) ? IMMEDIATE_CRASH() : EAT_STREAM_PARAMETERS
+
+// PCHECK includes the system error code, which is useful for determining
+// why the condition failed. In official builds, preserve only the error code
+// message so that it is available in crash reports. The stringified
+// condition and any additional stream parameters are dropped.
+#define PCHECK(condition) \
+ LAZY_STREAM(PLOG_STREAM(FATAL), UNLIKELY(!(condition))); \
+ EAT_STREAM_PARAMETERS
+
+#define CHECK_OP(name, op, val1, val2) CHECK((val1) op (val2))
+
+#else // !(OFFICIAL_BUILD && NDEBUG)
+
+// Do as much work as possible out of line to reduce inline code size.
+#define CHECK(condition) \
+ LAZY_STREAM(::logging::LogMessage(__FILE__, __LINE__, #condition).stream(), \
+ !ANALYZER_ASSUME_TRUE(condition))
+
+#define PCHECK(condition) \
+ LAZY_STREAM(PLOG_STREAM(FATAL), !ANALYZER_ASSUME_TRUE(condition)) \
+ << "Check failed: " #condition ". "
+
+// Helper macro for binary operators.
+// Don't use this macro directly in your code, use CHECK_EQ et al below.
+// The 'switch' is used to prevent the 'else' from being ambiguous when the
+// macro is used in an 'if' clause such as:
+// if (a == 1)
+// CHECK_EQ(2, a);
+#define CHECK_OP(name, op, val1, val2) \
+ switch (0) case 0: default: \
+ if (::logging::CheckOpResult true_if_passed = \
+ ::logging::Check##name##Impl((val1), (val2), \
+ #val1 " " #op " " #val2)) \
+ ; \
+ else \
+ ::logging::LogMessage(__FILE__, __LINE__, true_if_passed.message()).stream()
+
+#endif // !(OFFICIAL_BUILD && NDEBUG)
+
+// This formats a value for a failing CHECK_XX statement. Ordinarily,
+// it uses the definition for operator<<, with a few special cases below.
+template <typename T>
+inline typename std::enable_if<
+ base::internal::SupportsOstreamOperator<const T&>::value &&
+ !std::is_function<typename std::remove_pointer<T>::type>::value,
+ void>::type
+MakeCheckOpValueString(std::ostream* os, const T& v) {
+ (*os) << v;
+}
+
+// Overload for types that no operator<< but do have .ToString() defined.
+template <typename T>
+inline typename std::enable_if<
+ !base::internal::SupportsOstreamOperator<const T&>::value &&
+ base::internal::SupportsToString<const T&>::value,
+ void>::type
+MakeCheckOpValueString(std::ostream* os, const T& v) {
+ (*os) << v.ToString();
+}
+
+// Provide an overload for functions and function pointers. Function pointers
+// don't implicitly convert to void* but do implicitly convert to bool, so
+// without this function pointers are always printed as 1 or 0. (MSVC isn't
+// standards-conforming here and converts function pointers to regular
+// pointers, so this is a no-op for MSVC.)
+template <typename T>
+inline typename std::enable_if<
+ std::is_function<typename std::remove_pointer<T>::type>::value,
+ void>::type
+MakeCheckOpValueString(std::ostream* os, const T& v) {
+ (*os) << reinterpret_cast<const void*>(v);
+}
+
+// We need overloads for enums that don't support operator<<.
+// (i.e. scoped enums where no operator<< overload was declared).
+template <typename T>
+inline typename std::enable_if<
+ !base::internal::SupportsOstreamOperator<const T&>::value &&
+ std::is_enum<T>::value,
+ void>::type
+MakeCheckOpValueString(std::ostream* os, const T& v) {
+ (*os) << static_cast<typename std::underlying_type<T>::type>(v);
+}
+
+// We need an explicit overload for std::nullptr_t.
+BASE_EXPORT void MakeCheckOpValueString(std::ostream* os, std::nullptr_t p);
+
+// Build the error message string. This is separate from the "Impl"
+// function template because it is not performance critical and so can
+// be out of line, while the "Impl" code should be inline. Caller
+// takes ownership of the returned string.
+template<class t1, class t2>
+std::string* MakeCheckOpString(const t1& v1, const t2& v2, const char* names) {
+ std::ostringstream ss;
+ ss << names << " (";
+ MakeCheckOpValueString(&ss, v1);
+ ss << " vs. ";
+ MakeCheckOpValueString(&ss, v2);
+ ss << ")";
+ std::string* msg = new std::string(ss.str());
+ return msg;
+}
+
+// Commonly used instantiations of MakeCheckOpString<>. Explicitly instantiated
+// in logging.cc.
+extern template BASE_EXPORT std::string* MakeCheckOpString<int, int>(
+ const int&, const int&, const char* names);
+extern template BASE_EXPORT
+std::string* MakeCheckOpString<unsigned long, unsigned long>(
+ const unsigned long&, const unsigned long&, const char* names);
+extern template BASE_EXPORT
+std::string* MakeCheckOpString<unsigned long, unsigned int>(
+ const unsigned long&, const unsigned int&, const char* names);
+extern template BASE_EXPORT
+std::string* MakeCheckOpString<unsigned int, unsigned long>(
+ const unsigned int&, const unsigned long&, const char* names);
+extern template BASE_EXPORT
+std::string* MakeCheckOpString<std::string, std::string>(
+ const std::string&, const std::string&, const char* name);
+
+// Helper functions for CHECK_OP macro.
+// The (int, int) specialization works around the issue that the compiler
+// will not instantiate the template version of the function on values of
+// unnamed enum type - see comment below.
+//
+// The checked condition is wrapped with ANALYZER_ASSUME_TRUE, which under
+// static analysis builds, blocks analysis of the current path if the
+// condition is false.
+#define DEFINE_CHECK_OP_IMPL(name, op) \
+ template <class t1, class t2> \
+ constexpr std::string* Check##name##Impl(const t1& v1, const t2& v2, \
+ const char* names) { \
+ if (ANALYZER_ASSUME_TRUE(v1 op v2)) \
+ return nullptr; \
+ else \
+ return ::logging::MakeCheckOpString(v1, v2, names); \
+ } \
+ constexpr std::string* Check##name##Impl(int v1, int v2, \
+ const char* names) { \
+ if (ANALYZER_ASSUME_TRUE(v1 op v2)) \
+ return nullptr; \
+ else \
+ return ::logging::MakeCheckOpString(v1, v2, names); \
+ }
+DEFINE_CHECK_OP_IMPL(EQ, ==)
+DEFINE_CHECK_OP_IMPL(NE, !=)
+DEFINE_CHECK_OP_IMPL(LE, <=)
+DEFINE_CHECK_OP_IMPL(LT, < )
+DEFINE_CHECK_OP_IMPL(GE, >=)
+DEFINE_CHECK_OP_IMPL(GT, > )
+#undef DEFINE_CHECK_OP_IMPL
+
+#define CHECK_EQ(val1, val2) CHECK_OP(EQ, ==, val1, val2)
+#define CHECK_NE(val1, val2) CHECK_OP(NE, !=, val1, val2)
+#define CHECK_LE(val1, val2) CHECK_OP(LE, <=, val1, val2)
+#define CHECK_LT(val1, val2) CHECK_OP(LT, < , val1, val2)
+#define CHECK_GE(val1, val2) CHECK_OP(GE, >=, val1, val2)
+#define CHECK_GT(val1, val2) CHECK_OP(GT, > , val1, val2)
+
+#if defined(NDEBUG) && !defined(DCHECK_ALWAYS_ON)
+#define DCHECK_IS_ON() false
+#else
+#define DCHECK_IS_ON() true
+#endif
+
+// Definitions for DLOG et al.
+
+#if DCHECK_IS_ON()
+
+#define DLOG_IS_ON(severity) LOG_IS_ON(severity)
+#define DLOG_IF(severity, condition) LOG_IF(severity, condition)
+#define DLOG_ASSERT(condition) LOG_ASSERT(condition)
+#define DPLOG_IF(severity, condition) PLOG_IF(severity, condition)
+#define DVLOG_IF(verboselevel, condition) VLOG_IF(verboselevel, condition)
+#define DVPLOG_IF(verboselevel, condition) VPLOG_IF(verboselevel, condition)
+
+#else // DCHECK_IS_ON()
+
+// If !DCHECK_IS_ON(), we want to avoid emitting any references to |condition|
+// (which may reference a variable defined only if DCHECK_IS_ON()).
+// Contrast this with DCHECK et al., which has different behavior.
+
+#define DLOG_IS_ON(severity) false
+#define DLOG_IF(severity, condition) EAT_STREAM_PARAMETERS
+#define DLOG_ASSERT(condition) EAT_STREAM_PARAMETERS
+#define DPLOG_IF(severity, condition) EAT_STREAM_PARAMETERS
+#define DVLOG_IF(verboselevel, condition) EAT_STREAM_PARAMETERS
+#define DVPLOG_IF(verboselevel, condition) EAT_STREAM_PARAMETERS
+
+#endif // DCHECK_IS_ON()
+
+#define DLOG(severity) \
+ LAZY_STREAM(LOG_STREAM(severity), DLOG_IS_ON(severity))
+
+#define DPLOG(severity) \
+ LAZY_STREAM(PLOG_STREAM(severity), DLOG_IS_ON(severity))
+
+#define DVLOG(verboselevel) DVLOG_IF(verboselevel, true)
+
+#define DVPLOG(verboselevel) DVPLOG_IF(verboselevel, true)
+
+// Definitions for DCHECK et al.
+
+#if DCHECK_IS_ON()
+
+#if defined(DCHECK_IS_CONFIGURABLE)
+BASE_EXPORT extern LogSeverity LOG_DCHECK;
+#else
+const LogSeverity LOG_DCHECK = LOG_FATAL;
+#endif // defined(DCHECK_IS_CONFIGURABLE)
+
+#else // DCHECK_IS_ON()
+
+// There may be users of LOG_DCHECK that are enabled independently
+// of DCHECK_IS_ON(), so default to FATAL logging for those.
+const LogSeverity LOG_DCHECK = LOG_FATAL;
+
+#endif // DCHECK_IS_ON()
+
+// DCHECK et al. make sure to reference |condition| regardless of
+// whether DCHECKs are enabled; this is so that we don't get unused
+// variable warnings if the only use of a variable is in a DCHECK.
+// This behavior is different from DLOG_IF et al.
+//
+// Note that the definition of the DCHECK macros depends on whether or not
+// DCHECK_IS_ON() is true. When DCHECK_IS_ON() is false, the macros use
+// EAT_STREAM_PARAMETERS to avoid expressions that would create temporaries.
+
+#if DCHECK_IS_ON()
+
+#define DCHECK(condition) \
+ LAZY_STREAM(LOG_STREAM(DCHECK), !ANALYZER_ASSUME_TRUE(condition)) \
+ << "Check failed: " #condition ". "
+#define DPCHECK(condition) \
+ LAZY_STREAM(PLOG_STREAM(DCHECK), !ANALYZER_ASSUME_TRUE(condition)) \
+ << "Check failed: " #condition ". "
+
+#else // DCHECK_IS_ON()
+
+#define DCHECK(condition) EAT_STREAM_PARAMETERS << !(condition)
+#define DPCHECK(condition) EAT_STREAM_PARAMETERS << !(condition)
+
+#endif // DCHECK_IS_ON()
+
+// Helper macro for binary operators.
+// Don't use this macro directly in your code, use DCHECK_EQ et al below.
+// The 'switch' is used to prevent the 'else' from being ambiguous when the
+// macro is used in an 'if' clause such as:
+// if (a == 1)
+// DCHECK_EQ(2, a);
+#if DCHECK_IS_ON()
+
+#define DCHECK_OP(name, op, val1, val2) \
+ switch (0) case 0: default: \
+ if (::logging::CheckOpResult true_if_passed = \
+ ::logging::Check##name##Impl((val1), (val2), \
+ #val1 " " #op " " #val2)) \
+ ; \
+ else \
+ ::logging::LogMessage(__FILE__, __LINE__, ::logging::LOG_DCHECK, \
+ true_if_passed.message()).stream()
+
+#else // DCHECK_IS_ON()
+
+// When DCHECKs aren't enabled, DCHECK_OP still needs to reference operator<<
+// overloads for |val1| and |val2| to avoid potential compiler warnings about
+// unused functions. For the same reason, it also compares |val1| and |val2|
+// using |op|.
+//
+// Note that the contract of DCHECK_EQ, etc is that arguments are only evaluated
+// once. Even though |val1| and |val2| appear twice in this version of the macro
+// expansion, this is OK, since the expression is never actually evaluated.
+#define DCHECK_OP(name, op, val1, val2) \
+ EAT_STREAM_PARAMETERS << (::logging::MakeCheckOpValueString( \
+ ::logging::g_swallow_stream, val1), \
+ ::logging::MakeCheckOpValueString( \
+ ::logging::g_swallow_stream, val2), \
+ (val1)op(val2))
+
+#endif // DCHECK_IS_ON()
+
+// Equality/Inequality checks - compare two values, and log a
+// LOG_DCHECK message including the two values when the result is not
+// as expected. The values must have operator<<(ostream, ...)
+// defined.
+//
+// You may append to the error message like so:
+// DCHECK_NE(1, 2) << "The world must be ending!";
+//
+// We are very careful to ensure that each argument is evaluated exactly
+// once, and that anything which is legal to pass as a function argument is
+// legal here. In particular, the arguments may be temporary expressions
+// which will end up being destroyed at the end of the apparent statement,
+// for example:
+// DCHECK_EQ(string("abc")[1], 'b');
+//
+// WARNING: These don't compile correctly if one of the arguments is a pointer
+// and the other is NULL. In new code, prefer nullptr instead. To
+// work around this for C++98, simply static_cast NULL to the type of the
+// desired pointer.
+
+#define DCHECK_EQ(val1, val2) DCHECK_OP(EQ, ==, val1, val2)
+#define DCHECK_NE(val1, val2) DCHECK_OP(NE, !=, val1, val2)
+#define DCHECK_LE(val1, val2) DCHECK_OP(LE, <=, val1, val2)
+#define DCHECK_LT(val1, val2) DCHECK_OP(LT, < , val1, val2)
+#define DCHECK_GE(val1, val2) DCHECK_OP(GE, >=, val1, val2)
+#define DCHECK_GT(val1, val2) DCHECK_OP(GT, > , val1, val2)
+
+#if BUILDFLAG(ENABLE_LOG_ERROR_NOT_REACHED)
+// Implement logging of NOTREACHED() as a dedicated function to get function
+// call overhead down to a minimum.
+void LogErrorNotReached(const char* file, int line);
+#define NOTREACHED() \
+ true ? ::logging::LogErrorNotReached(__FILE__, __LINE__) \
+ : EAT_STREAM_PARAMETERS
+#else
+#define NOTREACHED() DCHECK(false)
+#endif
+
+// Redefine the standard assert to use our nice log files
+#undef assert
+#define assert(x) DLOG_ASSERT(x)
+
+// This class more or less represents a particular log message. You
+// create an instance of LogMessage and then stream stuff to it.
+// When you finish streaming to it, ~LogMessage is called and the
+// full message gets streamed to the appropriate destination.
+//
+// You shouldn't actually use LogMessage's constructor to log things,
+// though. You should use the LOG() macro (and variants thereof)
+// above.
+class BASE_EXPORT LogMessage {
+ public:
+ // Used for LOG(severity).
+ LogMessage(const char* file, int line, LogSeverity severity);
+
+ // Used for CHECK(). Implied severity = LOG_FATAL.
+ LogMessage(const char* file, int line, const char* condition);
+
+ // Used for CHECK_EQ(), etc. Takes ownership of the given string.
+ // Implied severity = LOG_FATAL.
+ LogMessage(const char* file, int line, std::string* result);
+
+ // Used for DCHECK_EQ(), etc. Takes ownership of the given string.
+ LogMessage(const char* file, int line, LogSeverity severity,
+ std::string* result);
+
+ ~LogMessage();
+
+ std::ostream& stream() { return stream_; }
+
+ LogSeverity severity() { return severity_; }
+ std::string str() { return stream_.str(); }
+
+ private:
+ void Init(const char* file, int line);
+
+ LogSeverity severity_;
+ std::ostringstream stream_;
+ size_t message_start_; // Offset of the start of the message (past prefix
+ // info).
+ // The file and line information passed in to the constructor.
+ const char* file_;
+ const int line_;
+ const char* file_basename_;
+
+ // This is useful since the LogMessage class uses a lot of Win32 calls
+ // that will lose the value of GLE and the code that called the log function
+ // will have lost the thread error value when the log call returns.
+ base::internal::ScopedClearLastError last_error_;
+
+ DISALLOW_COPY_AND_ASSIGN(LogMessage);
+};
+
+// This class is used to explicitly ignore values in the conditional
+// logging macros. This avoids compiler warnings like "value computed
+// is not used" and "statement has no effect".
+class LogMessageVoidify {
+ public:
+ LogMessageVoidify() = default;
+ // This has to be an operator with a precedence lower than << but
+ // higher than ?:
+ void operator&(std::ostream&) { }
+};
+
+#if defined(OS_WIN)
+typedef unsigned long SystemErrorCode;
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+typedef int SystemErrorCode;
+#endif
+
+// Alias for ::GetLastError() on Windows and errno on POSIX. Avoids having to
+// pull in windows.h just for GetLastError() and DWORD.
+BASE_EXPORT SystemErrorCode GetLastSystemErrorCode();
+BASE_EXPORT std::string SystemErrorCodeToString(SystemErrorCode error_code);
+
+#if defined(OS_WIN)
+// Appends a formatted system message of the GetLastError() type.
+class BASE_EXPORT Win32ErrorLogMessage {
+ public:
+ Win32ErrorLogMessage(const char* file,
+ int line,
+ LogSeverity severity,
+ SystemErrorCode err);
+
+ // Appends the error message before destructing the encapsulated class.
+ ~Win32ErrorLogMessage();
+
+ std::ostream& stream() { return log_message_.stream(); }
+
+ private:
+ SystemErrorCode err_;
+ LogMessage log_message_;
+
+ DISALLOW_COPY_AND_ASSIGN(Win32ErrorLogMessage);
+};
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+// Appends a formatted system message of the errno type
+class BASE_EXPORT ErrnoLogMessage {
+ public:
+ ErrnoLogMessage(const char* file,
+ int line,
+ LogSeverity severity,
+ SystemErrorCode err);
+
+ // Appends the error message before destructing the encapsulated class.
+ ~ErrnoLogMessage();
+
+ std::ostream& stream() { return log_message_.stream(); }
+
+ private:
+ SystemErrorCode err_;
+ LogMessage log_message_;
+
+ DISALLOW_COPY_AND_ASSIGN(ErrnoLogMessage);
+};
+#endif // OS_WIN
+
+// Closes the log file explicitly if open.
+// NOTE: Since the log file is opened as necessary by the action of logging
+// statements, there's no guarantee that it will stay closed
+// after this call.
+BASE_EXPORT void CloseLogFile();
+
+#if defined(OS_CHROMEOS)
+// Returns a new file handle that will write to the same destination as the
+// currently open log file. Returns nullptr if logging to a file is disabled,
+// or if opening the file failed. This is intended to be used to initialize
+// logging in child processes that are unable to open files.
+BASE_EXPORT FILE* DuplicateLogFILE();
+#endif
+
+// Async signal safe logging mechanism.
+BASE_EXPORT void RawLog(int level, const char* message);
+
+#define RAW_LOG(level, message) \
+ ::logging::RawLog(::logging::LOG_##level, message)
+
+#define RAW_CHECK(condition) \
+ do { \
+ if (!(condition)) \
+ ::logging::RawLog(::logging::LOG_FATAL, \
+ "Check failed: " #condition "\n"); \
+ } while (0)
+
+#if defined(OS_WIN)
+// Returns true if logging to file is enabled.
+BASE_EXPORT bool IsLoggingToFileEnabled();
+
+// Returns the default log file path.
+BASE_EXPORT std::wstring GetLogFileFullPath();
+#endif
+
+} // namespace logging
+
+// Note that "The behavior of a C++ program is undefined if it adds declarations
+// or definitions to namespace std or to a namespace within namespace std unless
+// otherwise specified." --C++11[namespace.std]
+//
+// We've checked that this particular definition has the intended behavior on
+// our implementations, but it's prone to breaking in the future, and please
+// don't imitate this in your own definitions without checking with some
+// standard library experts.
+namespace std {
+// These functions are provided as a convenience for logging, which is where we
+// use streams (it is against Google style to use streams in other places). It
+// is designed to allow you to emit non-ASCII Unicode strings to the log file,
+// which is normally ASCII. It is relatively slow, so try not to use it for
+// common cases. Non-ASCII characters will be converted to UTF-8 by these
+// operators.
+BASE_EXPORT std::ostream& operator<<(std::ostream& out, const wchar_t* wstr);
+inline std::ostream& operator<<(std::ostream& out, const std::wstring& wstr) {
+ return out << wstr.c_str();
+}
+} // namespace std
+
+// The NOTIMPLEMENTED() macro annotates codepaths which have not been
+// implemented yet. If output spam is a serious concern,
+// NOTIMPLEMENTED_LOG_ONCE can be used.
+
+#if defined(COMPILER_GCC)
+// On Linux, with GCC, we can use __PRETTY_FUNCTION__ to get the demangled name
+// of the current function in the NOTIMPLEMENTED message.
+#define NOTIMPLEMENTED_MSG "Not implemented reached in " << __PRETTY_FUNCTION__
+#else
+#define NOTIMPLEMENTED_MSG "NOT IMPLEMENTED"
+#endif
+
+#define NOTIMPLEMENTED() DLOG(ERROR) << NOTIMPLEMENTED_MSG
+#define NOTIMPLEMENTED_LOG_ONCE() \
+ do { \
+ static bool logged_once = false; \
+ DLOG_IF(ERROR, !logged_once) << NOTIMPLEMENTED_MSG; \
+ logged_once = true; \
+ } while (0); \
+ EAT_STREAM_PARAMETERS
+
+#endif // BASE_LOGGING_H_
diff --git a/security/sandbox/chromium/base/macros.h b/security/sandbox/chromium/base/macros.h
new file mode 100644
index 0000000000..c67bdbd987
--- /dev/null
+++ b/security/sandbox/chromium/base/macros.h
@@ -0,0 +1,48 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains macros and macro-like constructs (e.g., templates) that
+// are commonly used throughout Chromium source. (It may also contain things
+// that are closely related to things that are commonly used that belong in this
+// file.)
+
+#ifndef BASE_MACROS_H_
+#define BASE_MACROS_H_
+
+// ALL DISALLOW_xxx MACROS ARE DEPRECATED; DO NOT USE IN NEW CODE.
+// Use explicit deletions instead. See the section on copyability/movability in
+// //styleguide/c++/c++-dos-and-donts.md for more information.
+
+// Put this in the declarations for a class to be uncopyable.
+#define DISALLOW_COPY(TypeName) \
+ TypeName(const TypeName&) = delete
+
+// Put this in the declarations for a class to be unassignable.
+#define DISALLOW_ASSIGN(TypeName) TypeName& operator=(const TypeName&) = delete
+
+// Put this in the declarations for a class to be uncopyable and unassignable.
+#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
+ DISALLOW_COPY(TypeName); \
+ DISALLOW_ASSIGN(TypeName)
+
+// A macro to disallow all the implicit constructors, namely the
+// default constructor, copy constructor and operator= functions.
+// This is especially useful for classes containing only static methods.
+#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \
+ TypeName() = delete; \
+ DISALLOW_COPY_AND_ASSIGN(TypeName)
+
+// Used to explicitly mark the return value of a function as unused. If you are
+// really sure you don't want to do anything with the return value of a function
+// that has been marked WARN_UNUSED_RESULT, wrap it with this. Example:
+//
+// std::unique_ptr<MyType> my_var = ...;
+// if (TakeOwnership(my_var.get()) == SUCCESS)
+// ignore_result(my_var.release());
+//
+template<typename T>
+inline void ignore_result(const T&) {
+}
+
+#endif // BASE_MACROS_H_
diff --git a/security/sandbox/chromium/base/memory/aligned_memory.h b/security/sandbox/chromium/base/memory/aligned_memory.h
new file mode 100644
index 0000000000..a242b730be
--- /dev/null
+++ b/security/sandbox/chromium/base/memory/aligned_memory.h
@@ -0,0 +1,60 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_ALIGNED_MEMORY_H_
+#define BASE_MEMORY_ALIGNED_MEMORY_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <type_traits>
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "build/build_config.h"
+
+#if defined(COMPILER_MSVC)
+#include <malloc.h>
+#else
+#include <stdlib.h>
+#endif
+
+// A runtime sized aligned allocation can be created:
+//
+// float* my_array = static_cast<float*>(AlignedAlloc(size, alignment));
+//
+// // ... later, to release the memory:
+// AlignedFree(my_array);
+//
+// Or using unique_ptr:
+//
+// std::unique_ptr<float, AlignedFreeDeleter> my_array(
+// static_cast<float*>(AlignedAlloc(size, alignment)));
+
+namespace base {
+
+// This can be replaced with std::aligned_alloc when we have C++17.
+// Caveat: std::aligned_alloc requires the size parameter be an integral
+// multiple of alignment.
+BASE_EXPORT void* AlignedAlloc(size_t size, size_t alignment);
+
+inline void AlignedFree(void* ptr) {
+#if defined(COMPILER_MSVC)
+ _aligned_free(ptr);
+#else
+ free(ptr);
+#endif
+}
+
+// Deleter for use with unique_ptr. E.g., use as
+// std::unique_ptr<Foo, base::AlignedFreeDeleter> foo;
+struct AlignedFreeDeleter {
+ inline void operator()(void* ptr) const {
+ AlignedFree(ptr);
+ }
+};
+
+} // namespace base
+
+#endif // BASE_MEMORY_ALIGNED_MEMORY_H_
diff --git a/security/sandbox/chromium/base/memory/free_deleter.h b/security/sandbox/chromium/base/memory/free_deleter.h
new file mode 100644
index 0000000000..5604118865
--- /dev/null
+++ b/security/sandbox/chromium/base/memory/free_deleter.h
@@ -0,0 +1,25 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_FREE_DELETER_H_
+#define BASE_MEMORY_FREE_DELETER_H_
+
+#include <stdlib.h>
+
+namespace base {
+
+// Function object which invokes 'free' on its parameter, which must be
+// a pointer. Can be used to store malloc-allocated pointers in std::unique_ptr:
+//
+// std::unique_ptr<int, base::FreeDeleter> foo_ptr(
+// static_cast<int*>(malloc(sizeof(int))));
+struct FreeDeleter {
+ inline void operator()(void* ptr) const {
+ free(ptr);
+ }
+};
+
+} // namespace base
+
+#endif // BASE_MEMORY_FREE_DELETER_H_
diff --git a/security/sandbox/chromium/base/memory/platform_shared_memory_region.cc b/security/sandbox/chromium/base/memory/platform_shared_memory_region.cc
new file mode 100644
index 0000000000..45647925b3
--- /dev/null
+++ b/security/sandbox/chromium/base/memory/platform_shared_memory_region.cc
@@ -0,0 +1,62 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/platform_shared_memory_region.h"
+
+#include "base/memory/shared_memory_mapping.h"
+#include "base/numerics/checked_math.h"
+
+namespace base {
+namespace subtle {
+
+// static
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::CreateWritable(
+ size_t size) {
+ return Create(Mode::kWritable, size);
+}
+
+// static
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::CreateUnsafe(
+ size_t size) {
+ return Create(Mode::kUnsafe, size);
+}
+
+PlatformSharedMemoryRegion::PlatformSharedMemoryRegion() = default;
+PlatformSharedMemoryRegion::PlatformSharedMemoryRegion(
+ PlatformSharedMemoryRegion&& other) = default;
+PlatformSharedMemoryRegion& PlatformSharedMemoryRegion::operator=(
+ PlatformSharedMemoryRegion&& other) = default;
+PlatformSharedMemoryRegion::~PlatformSharedMemoryRegion() = default;
+
+PlatformSharedMemoryRegion::ScopedPlatformHandle
+PlatformSharedMemoryRegion::PassPlatformHandle() {
+ return std::move(handle_);
+}
+
+bool PlatformSharedMemoryRegion::MapAt(off_t offset,
+ size_t size,
+ void** memory,
+ size_t* mapped_size) const {
+ if (!IsValid())
+ return false;
+
+ if (size == 0)
+ return false;
+
+ size_t end_byte;
+ if (!CheckAdd(offset, size).AssignIfValid(&end_byte) || end_byte > size_) {
+ return false;
+ }
+
+ bool success = MapAtInternal(offset, size, memory, mapped_size);
+ if (success) {
+ DCHECK_EQ(
+ 0U, reinterpret_cast<uintptr_t>(*memory) & (kMapMinimumAlignment - 1));
+ }
+
+ return success;
+}
+
+} // namespace subtle
+} // namespace base
diff --git a/security/sandbox/chromium/base/memory/platform_shared_memory_region.h b/security/sandbox/chromium/base/memory/platform_shared_memory_region.h
new file mode 100644
index 0000000000..220cbdd65e
--- /dev/null
+++ b/security/sandbox/chromium/base/memory/platform_shared_memory_region.h
@@ -0,0 +1,301 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_PLATFORM_SHARED_MEMORY_REGION_H_
+#define BASE_MEMORY_PLATFORM_SHARED_MEMORY_REGION_H_
+
+#include <utility>
+
+#include "base/compiler_specific.h"
+#include "base/gtest_prod_util.h"
+#include "base/macros.h"
+#include "base/unguessable_token.h"
+#include "build/build_config.h"
+
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+#include <mach/mach.h>
+#include "base/mac/scoped_mach_port.h"
+#elif defined(OS_FUCHSIA)
+#include <lib/zx/vmo.h>
+#elif defined(OS_WIN)
+#include "base/win/scoped_handle.h"
+#include "base/win/windows_types.h"
+#elif defined(OS_POSIX)
+#include <sys/types.h>
+#include "base/file_descriptor_posix.h"
+#include "base/files/scoped_file.h"
+#endif
+
+#if defined(OS_LINUX)
+namespace content {
+class SandboxIPCHandler;
+}
+#endif
+
+namespace base {
+namespace subtle {
+
+#if defined(OS_POSIX) && (!defined(OS_MACOSX) || defined(OS_IOS)) && \
+ !defined(OS_ANDROID)
+// Helper structs to keep two descriptors on POSIX. It's needed to support
+// ConvertToReadOnly().
+struct BASE_EXPORT FDPair {
+ // The main shared memory descriptor that is used for mapping. May be either
+ // writable or read-only, depending on region's mode.
+ int fd;
+ // The read-only descriptor, valid only in kWritable mode. Replaces |fd| when
+ // a region is converted to read-only.
+ int readonly_fd;
+};
+
+struct BASE_EXPORT ScopedFDPair {
+ ScopedFDPair();
+ ScopedFDPair(ScopedFD in_fd, ScopedFD in_readonly_fd);
+ ScopedFDPair(ScopedFDPair&&);
+ ScopedFDPair& operator=(ScopedFDPair&&);
+ ~ScopedFDPair();
+
+ FDPair get() const;
+
+ ScopedFD fd;
+ ScopedFD readonly_fd;
+};
+#endif
+
+// Implementation class for shared memory regions.
+//
+// This class does the following:
+//
+// - Wraps and owns a shared memory region platform handle.
+// - Provides a way to allocate a new region of platform shared memory of given
+// size.
+// - Provides a way to create mapping of the region in the current process'
+// address space, under special access-control constraints (see Mode).
+// - Provides methods to help transferring the handle across process boundaries.
+// - Holds a 128-bit unique identifier used to uniquely identify the same
+// kernel region resource across processes (used for memory tracking).
+// - Has a method to retrieve the region's size in bytes.
+//
+// IMPORTANT NOTE: Users should never use this directly, but
+// ReadOnlySharedMemoryRegion, WritableSharedMemoryRegion or
+// UnsafeSharedMemoryRegion since this is an implementation class.
+class BASE_EXPORT PlatformSharedMemoryRegion {
+ public:
+ // Permission mode of the platform handle. Each mode corresponds to one of the
+ // typed shared memory classes:
+ //
+ // * ReadOnlySharedMemoryRegion: A region that can only create read-only
+ // mappings.
+ //
+ // * WritableSharedMemoryRegion: A region that can only create writable
+ // mappings. The region can be demoted to ReadOnlySharedMemoryRegion without
+ // the possibility of promoting back to writable.
+ //
+ // * UnsafeSharedMemoryRegion: A region that can only create writable
+ // mappings. The region cannot be demoted to ReadOnlySharedMemoryRegion.
+ enum class Mode {
+ kReadOnly, // ReadOnlySharedMemoryRegion
+ kWritable, // WritableSharedMemoryRegion
+ kUnsafe, // UnsafeSharedMemoryRegion
+ kMaxValue = kUnsafe
+ };
+
+ // Errors that can occur during Shared Memory construction.
+ // These match tools/metrics/histograms/enums.xml.
+ // This enum is append-only.
+ enum class CreateError {
+ SUCCESS = 0,
+ SIZE_ZERO = 1,
+ SIZE_TOO_LARGE = 2,
+ INITIALIZE_ACL_FAILURE = 3,
+ INITIALIZE_SECURITY_DESC_FAILURE = 4,
+ SET_SECURITY_DESC_FAILURE = 5,
+ CREATE_FILE_MAPPING_FAILURE = 6,
+ REDUCE_PERMISSIONS_FAILURE = 7,
+ ALREADY_EXISTS = 8,
+ ALLOCATE_FILE_REGION_FAILURE = 9,
+ FSTAT_FAILURE = 10,
+ INODES_MISMATCH = 11,
+ GET_SHMEM_TEMP_DIR_FAILURE = 12,
+ kMaxValue = GET_SHMEM_TEMP_DIR_FAILURE
+ };
+
+#if defined(OS_LINUX)
+ // Structure to limit access to executable region creation.
+ struct ExecutableRegion {
+ private:
+ // Creates a new shared memory region the unsafe mode (writable and not and
+ // convertible to read-only), and in addition marked executable. A ScopedFD
+ // to this region is returned. Any any mapping will have to be done
+ // manually, including setting executable permissions if necessary
+ //
+ // This is only used to support sandbox_ipc_linux.cc, and should not be used
+ // anywhere else in chrome. This is restricted via AllowCreateExecutable.
+ // TODO(crbug.com/982879): remove this when NaCl is unshipped.
+ //
+ // Returns an invalid ScopedFD if the call fails.
+ static ScopedFD CreateFD(size_t size);
+
+ friend class content::SandboxIPCHandler;
+ };
+#endif
+
+// Platform-specific shared memory type used by this class.
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+ using PlatformHandle = mach_port_t;
+ using ScopedPlatformHandle = mac::ScopedMachSendRight;
+#elif defined(OS_FUCHSIA)
+ using PlatformHandle = zx::unowned_vmo;
+ using ScopedPlatformHandle = zx::vmo;
+#elif defined(OS_WIN)
+ using PlatformHandle = HANDLE;
+ using ScopedPlatformHandle = win::ScopedHandle;
+#elif defined(OS_ANDROID)
+ using PlatformHandle = int;
+ using ScopedPlatformHandle = ScopedFD;
+#else
+ using PlatformHandle = FDPair;
+ using ScopedPlatformHandle = ScopedFDPair;
+#endif
+
+ // The minimum alignment in bytes that any mapped address produced by Map()
+ // and MapAt() is guaranteed to have.
+ enum { kMapMinimumAlignment = 32 };
+
+ // Creates a new PlatformSharedMemoryRegion with corresponding mode and size.
+ // Creating in kReadOnly mode isn't supported because then there will be no
+ // way to modify memory content.
+ static PlatformSharedMemoryRegion CreateWritable(size_t size);
+ static PlatformSharedMemoryRegion CreateUnsafe(size_t size);
+
+ // Returns a new PlatformSharedMemoryRegion that takes ownership of the
+ // |handle|. All parameters must be taken from another valid
+ // PlatformSharedMemoryRegion instance, e.g. |size| must be equal to the
+ // actual region size as allocated by the kernel.
+ // Closes the |handle| and returns an invalid instance if passed parameters
+ // are invalid.
+ static PlatformSharedMemoryRegion Take(ScopedPlatformHandle handle,
+ Mode mode,
+ size_t size,
+ const UnguessableToken& guid);
+#if defined(OS_POSIX) && !defined(OS_ANDROID) && \
+ !(defined(OS_MACOSX) && !defined(OS_IOS))
+ // Specialized version of Take() for POSIX that takes only one file descriptor
+ // instead of pair. Cannot be used with kWritable |mode|.
+ static PlatformSharedMemoryRegion Take(ScopedFD handle,
+ Mode mode,
+ size_t size,
+ const UnguessableToken& guid);
+#endif
+
+ // Default constructor initializes an invalid instance, i.e. an instance that
+ // doesn't wrap any valid platform handle.
+ PlatformSharedMemoryRegion();
+
+ // Move operations are allowed.
+ PlatformSharedMemoryRegion(PlatformSharedMemoryRegion&&);
+ PlatformSharedMemoryRegion& operator=(PlatformSharedMemoryRegion&&);
+
+ // Destructor closes the platform handle. Does nothing if the handle is
+ // invalid.
+ ~PlatformSharedMemoryRegion();
+
+ // Passes ownership of the platform handle to the caller. The current instance
+ // becomes invalid. It's the responsibility of the caller to close the
+ // handle. If the current instance is invalid, ScopedPlatformHandle will also
+ // be invalid.
+ ScopedPlatformHandle PassPlatformHandle() WARN_UNUSED_RESULT;
+
+ // Returns the platform handle. The current instance keeps ownership of this
+ // handle.
+ PlatformHandle GetPlatformHandle() const;
+
+ // Whether the platform handle is valid.
+ bool IsValid() const;
+
+ // Duplicates the platform handle and creates a new PlatformSharedMemoryRegion
+ // with the same |mode_|, |size_| and |guid_| that owns this handle. Returns
+ // invalid region on failure, the current instance remains valid.
+ // Can be called only in kReadOnly and kUnsafe modes, CHECK-fails if is
+ // called in kWritable mode.
+ PlatformSharedMemoryRegion Duplicate() const;
+
+ // Converts the region to read-only. Returns whether the operation succeeded.
+ // Makes the current instance invalid on failure. Can be called only in
+ // kWritable mode, all other modes will CHECK-fail. The object will have
+ // kReadOnly mode after this call on success.
+ bool ConvertToReadOnly();
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+ // Same as above, but |mapped_addr| is used as a hint to avoid additional
+ // mapping of the memory object.
+ // |mapped_addr| must be mapped location of |memory_object_|. If the location
+ // is unknown, |mapped_addr| should be |nullptr|.
+ bool ConvertToReadOnly(void* mapped_addr);
+#endif // defined(OS_MACOSX) && !defined(OS_IOS)
+
+ // Converts the region to unsafe. Returns whether the operation succeeded.
+ // Makes the current instance invalid on failure. Can be called only in
+ // kWritable mode, all other modes will CHECK-fail. The object will have
+ // kUnsafe mode after this call on success.
+ bool ConvertToUnsafe();
+
+ // Maps |size| bytes of the shared memory region starting with the given
+ // |offset| into the caller's address space. |offset| must be aligned to value
+ // of |SysInfo::VMAllocationGranularity()|. Fails if requested bytes are out
+ // of the region limits.
+ // Returns true and sets |memory| and |mapped_size| on success, returns false
+ // and leaves output parameters in unspecified state otherwise. The mapped
+ // address is guaranteed to have an alignment of at least
+ // |kMapMinimumAlignment|.
+ bool MapAt(off_t offset,
+ size_t size,
+ void** memory,
+ size_t* mapped_size) const;
+
+ const UnguessableToken& GetGUID() const { return guid_; }
+
+ size_t GetSize() const { return size_; }
+
+ Mode GetMode() const { return mode_; }
+
+ private:
+ FRIEND_TEST_ALL_PREFIXES(PlatformSharedMemoryRegionTest,
+ CreateReadOnlyRegionDeathTest);
+ FRIEND_TEST_ALL_PREFIXES(PlatformSharedMemoryRegionTest,
+ CheckPlatformHandlePermissionsCorrespondToMode);
+ static PlatformSharedMemoryRegion Create(Mode mode,
+ size_t size
+#if defined(OS_LINUX)
+ ,
+ bool executable = false
+#endif
+ );
+
+ static bool CheckPlatformHandlePermissionsCorrespondToMode(
+ PlatformHandle handle,
+ Mode mode,
+ size_t size);
+
+ PlatformSharedMemoryRegion(ScopedPlatformHandle handle,
+ Mode mode,
+ size_t size,
+ const UnguessableToken& guid);
+
+ bool MapAtInternal(off_t offset,
+ size_t size,
+ void** memory,
+ size_t* mapped_size) const;
+
+ ScopedPlatformHandle handle_;
+ Mode mode_ = Mode::kReadOnly;
+ size_t size_ = 0;
+ UnguessableToken guid_;
+
+ DISALLOW_COPY_AND_ASSIGN(PlatformSharedMemoryRegion);
+};
+
+} // namespace subtle
+} // namespace base
+
+#endif // BASE_MEMORY_PLATFORM_SHARED_MEMORY_REGION_H_
diff --git a/security/sandbox/chromium/base/memory/platform_shared_memory_region_win.cc b/security/sandbox/chromium/base/memory/platform_shared_memory_region_win.cc
new file mode 100644
index 0000000000..c2f3704f91
--- /dev/null
+++ b/security/sandbox/chromium/base/memory/platform_shared_memory_region_win.cc
@@ -0,0 +1,343 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/platform_shared_memory_region.h"
+
+#include <aclapi.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/allocator/partition_allocator/page_allocator.h"
+#include "base/bits.h"
+#include "base/metrics/histogram_functions.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/process/process_handle.h"
+#include "base/rand_util.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/win/windows_version.h"
+
+namespace base {
+namespace subtle {
+
+namespace {
+
+// Emits UMA metrics about encountered errors. Pass zero (0) for |winerror|
+// if there is no associated Windows error.
+void LogError(PlatformSharedMemoryRegion::CreateError error, DWORD winerror) {
+ UMA_HISTOGRAM_ENUMERATION("SharedMemory.CreateError", error);
+ static_assert(ERROR_SUCCESS == 0, "Windows error code changed!");
+ if (winerror != ERROR_SUCCESS)
+ UmaHistogramSparse("SharedMemory.CreateWinError", winerror);
+}
+
+typedef enum _SECTION_INFORMATION_CLASS {
+ SectionBasicInformation,
+} SECTION_INFORMATION_CLASS;
+
+typedef struct _SECTION_BASIC_INFORMATION {
+ PVOID BaseAddress;
+ ULONG Attributes;
+ LARGE_INTEGER Size;
+} SECTION_BASIC_INFORMATION, *PSECTION_BASIC_INFORMATION;
+
+typedef ULONG(__stdcall* NtQuerySectionType)(
+ HANDLE SectionHandle,
+ SECTION_INFORMATION_CLASS SectionInformationClass,
+ PVOID SectionInformation,
+ ULONG SectionInformationLength,
+ PULONG ResultLength);
+
+// Returns the length of the memory section starting at the supplied address.
+size_t GetMemorySectionSize(void* address) {
+ MEMORY_BASIC_INFORMATION memory_info;
+ if (!::VirtualQuery(address, &memory_info, sizeof(memory_info)))
+ return 0;
+ return memory_info.RegionSize -
+ (static_cast<char*>(address) -
+ static_cast<char*>(memory_info.AllocationBase));
+}
+
+// Checks if the section object is safe to map. At the moment this just means
+// it's not an image section.
+bool IsSectionSafeToMap(HANDLE handle) {
+ static NtQuerySectionType nt_query_section_func =
+ reinterpret_cast<NtQuerySectionType>(
+ ::GetProcAddress(::GetModuleHandle(L"ntdll.dll"), "NtQuerySection"));
+ DCHECK(nt_query_section_func);
+
+ // The handle must have SECTION_QUERY access for this to succeed.
+ SECTION_BASIC_INFORMATION basic_information = {};
+ ULONG status =
+ nt_query_section_func(handle, SectionBasicInformation, &basic_information,
+ sizeof(basic_information), nullptr);
+ if (status)
+ return false;
+ return (basic_information.Attributes & SEC_IMAGE) != SEC_IMAGE;
+}
+
+// Returns a HANDLE on success and |nullptr| on failure.
+// This function is similar to CreateFileMapping, but removes the permissions
+// WRITE_DAC, WRITE_OWNER, READ_CONTROL, and DELETE.
+//
+// A newly created file mapping has two sets of permissions. It has access
+// control permissions (WRITE_DAC, WRITE_OWNER, READ_CONTROL, and DELETE) and
+// file permissions (FILE_MAP_READ, FILE_MAP_WRITE, etc.). The Chrome sandbox
+// prevents HANDLEs with the WRITE_DAC permission from being duplicated into
+// unprivileged processes.
+//
+// In order to remove the access control permissions, after being created the
+// handle is duplicated with only the file access permissions.
+HANDLE CreateFileMappingWithReducedPermissions(SECURITY_ATTRIBUTES* sa,
+ size_t rounded_size,
+ LPCWSTR name) {
+ HANDLE h = CreateFileMapping(INVALID_HANDLE_VALUE, sa, PAGE_READWRITE, 0,
+ static_cast<DWORD>(rounded_size), name);
+ if (!h) {
+ LogError(
+ PlatformSharedMemoryRegion::CreateError::CREATE_FILE_MAPPING_FAILURE,
+ GetLastError());
+ return nullptr;
+ }
+
+ HANDLE h2;
+ ProcessHandle process = GetCurrentProcess();
+ BOOL success = ::DuplicateHandle(
+ process, h, process, &h2, FILE_MAP_READ | FILE_MAP_WRITE | SECTION_QUERY,
+ FALSE, 0);
+ BOOL rv = ::CloseHandle(h);
+ DCHECK(rv);
+
+ if (!success) {
+ LogError(
+ PlatformSharedMemoryRegion::CreateError::REDUCE_PERMISSIONS_FAILURE,
+ GetLastError());
+ return nullptr;
+ }
+
+ return h2;
+}
+
+} // namespace
+
+// static
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Take(
+ win::ScopedHandle handle,
+ Mode mode,
+ size_t size,
+ const UnguessableToken& guid) {
+ if (!handle.IsValid())
+ return {};
+
+ if (size == 0)
+ return {};
+
+ if (size > static_cast<size_t>(std::numeric_limits<int>::max()))
+ return {};
+
+ if (!IsSectionSafeToMap(handle.Get()))
+ return {};
+
+ CHECK(
+ CheckPlatformHandlePermissionsCorrespondToMode(handle.Get(), mode, size));
+
+ return PlatformSharedMemoryRegion(std::move(handle), mode, size, guid);
+}
+
+HANDLE PlatformSharedMemoryRegion::GetPlatformHandle() const {
+ return handle_.Get();
+}
+
+bool PlatformSharedMemoryRegion::IsValid() const {
+ return handle_.IsValid();
+}
+
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Duplicate() const {
+ if (!IsValid())
+ return {};
+
+ CHECK_NE(mode_, Mode::kWritable)
+ << "Duplicating a writable shared memory region is prohibited";
+
+ HANDLE duped_handle;
+ ProcessHandle process = GetCurrentProcess();
+ BOOL success =
+ ::DuplicateHandle(process, handle_.Get(), process, &duped_handle, 0,
+ FALSE, DUPLICATE_SAME_ACCESS);
+ if (!success)
+ return {};
+
+ return PlatformSharedMemoryRegion(win::ScopedHandle(duped_handle), mode_,
+ size_, guid_);
+}
+
+bool PlatformSharedMemoryRegion::ConvertToReadOnly() {
+ if (!IsValid())
+ return false;
+
+ CHECK_EQ(mode_, Mode::kWritable)
+ << "Only writable shared memory region can be converted to read-only";
+
+ win::ScopedHandle handle_copy(handle_.Take());
+
+ HANDLE duped_handle;
+ ProcessHandle process = GetCurrentProcess();
+ BOOL success =
+ ::DuplicateHandle(process, handle_copy.Get(), process, &duped_handle,
+ FILE_MAP_READ | SECTION_QUERY, FALSE, 0);
+ if (!success)
+ return false;
+
+ handle_.Set(duped_handle);
+ mode_ = Mode::kReadOnly;
+ return true;
+}
+
+bool PlatformSharedMemoryRegion::ConvertToUnsafe() {
+ if (!IsValid())
+ return false;
+
+ CHECK_EQ(mode_, Mode::kWritable)
+ << "Only writable shared memory region can be converted to unsafe";
+
+ mode_ = Mode::kUnsafe;
+ return true;
+}
+
+bool PlatformSharedMemoryRegion::MapAtInternal(off_t offset,
+ size_t size,
+ void** memory,
+ size_t* mapped_size) const {
+ bool write_allowed = mode_ != Mode::kReadOnly;
+ // Try to map the shared memory. On the first failure, release any reserved
+ // address space for a single entry.
+ for (int i = 0; i < 2; ++i) {
+ *memory = MapViewOfFile(
+ handle_.Get(), FILE_MAP_READ | (write_allowed ? FILE_MAP_WRITE : 0),
+ static_cast<uint64_t>(offset) >> 32, static_cast<DWORD>(offset), size);
+ if (*memory)
+ break;
+ ReleaseReservation();
+ }
+ if (!*memory) {
+ DPLOG(ERROR) << "Failed executing MapViewOfFile";
+ return false;
+ }
+
+ *mapped_size = GetMemorySectionSize(*memory);
+ return true;
+}
+
+// static
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Create(Mode mode,
+ size_t size) {
+ // TODO(crbug.com/210609): NaCl forces us to round up 64k here, wasting 32k
+ // per mapping on average.
+ static const size_t kSectionSize = 65536;
+ if (size == 0) {
+ LogError(CreateError::SIZE_ZERO, 0);
+ return {};
+ }
+
+ // Aligning may overflow so check that the result doesn't decrease.
+ size_t rounded_size = bits::Align(size, kSectionSize);
+ if (rounded_size < size ||
+ rounded_size > static_cast<size_t>(std::numeric_limits<int>::max())) {
+ LogError(CreateError::SIZE_TOO_LARGE, 0);
+ return {};
+ }
+
+ CHECK_NE(mode, Mode::kReadOnly) << "Creating a region in read-only mode will "
+ "lead to this region being non-modifiable";
+
+ // Add an empty DACL to enforce anonymous read-only sections.
+ ACL dacl;
+ SECURITY_DESCRIPTOR sd;
+ if (!InitializeAcl(&dacl, sizeof(dacl), ACL_REVISION)) {
+ LogError(CreateError::INITIALIZE_ACL_FAILURE, GetLastError());
+ return {};
+ }
+ if (!InitializeSecurityDescriptor(&sd, SECURITY_DESCRIPTOR_REVISION)) {
+ LogError(CreateError::INITIALIZE_SECURITY_DESC_FAILURE, GetLastError());
+ return {};
+ }
+ if (!SetSecurityDescriptorDacl(&sd, TRUE, &dacl, FALSE)) {
+ LogError(CreateError::SET_SECURITY_DESC_FAILURE, GetLastError());
+ return {};
+ }
+
+ string16 name;
+ if (win::GetVersion() < win::Version::WIN8_1) {
+ // Windows < 8.1 ignores DACLs on certain unnamed objects (like shared
+ // sections). So, we generate a random name when we need to enforce
+ // read-only.
+ uint64_t rand_values[4];
+ RandBytes(&rand_values, sizeof(rand_values));
+ name = ASCIIToUTF16(StringPrintf("CrSharedMem_%016llx%016llx%016llx%016llx",
+ rand_values[0], rand_values[1],
+ rand_values[2], rand_values[3]));
+ DCHECK(!name.empty());
+ }
+
+ SECURITY_ATTRIBUTES sa = {sizeof(sa), &sd, FALSE};
+ // Ask for the file mapping with reduced permisions to avoid passing the
+ // access control permissions granted by default into unpriviledged process.
+ HANDLE h = CreateFileMappingWithReducedPermissions(
+ &sa, rounded_size, name.empty() ? nullptr : as_wcstr(name));
+ if (h == nullptr) {
+ // The error is logged within CreateFileMappingWithReducedPermissions().
+ return {};
+ }
+
+ win::ScopedHandle scoped_h(h);
+ // Check if the shared memory pre-exists.
+ if (GetLastError() == ERROR_ALREADY_EXISTS) {
+ LogError(CreateError::ALREADY_EXISTS, ERROR_ALREADY_EXISTS);
+ return {};
+ }
+
+ LogError(CreateError::SUCCESS, ERROR_SUCCESS);
+ return PlatformSharedMemoryRegion(std::move(scoped_h), mode, size,
+ UnguessableToken::Create());
+}
+
+// static
+bool PlatformSharedMemoryRegion::CheckPlatformHandlePermissionsCorrespondToMode(
+ PlatformHandle handle,
+ Mode mode,
+ size_t size) {
+ // Call ::DuplicateHandle() with FILE_MAP_WRITE as a desired access to check
+ // if the |handle| has a write access.
+ ProcessHandle process = GetCurrentProcess();
+ HANDLE duped_handle;
+ BOOL success = ::DuplicateHandle(process, handle, process, &duped_handle,
+ FILE_MAP_WRITE, FALSE, 0);
+ if (success) {
+ BOOL rv = ::CloseHandle(duped_handle);
+ DCHECK(rv);
+ }
+
+ bool is_read_only = !success;
+ bool expected_read_only = mode == Mode::kReadOnly;
+
+ if (is_read_only != expected_read_only) {
+ DLOG(ERROR) << "File mapping handle has wrong access rights: it is"
+ << (is_read_only ? " " : " not ") << "read-only but it should"
+ << (expected_read_only ? " " : " not ") << "be";
+ return false;
+ }
+
+ return true;
+}
+
+PlatformSharedMemoryRegion::PlatformSharedMemoryRegion(
+ win::ScopedHandle handle,
+ Mode mode,
+ size_t size,
+ const UnguessableToken& guid)
+ : handle_(std::move(handle)), mode_(mode), size_(size), guid_(guid) {}
+
+} // namespace subtle
+} // namespace base
diff --git a/security/sandbox/chromium/base/memory/ptr_util.h b/security/sandbox/chromium/base/memory/ptr_util.h
new file mode 100644
index 0000000000..42f4f49eeb
--- /dev/null
+++ b/security/sandbox/chromium/base/memory/ptr_util.h
@@ -0,0 +1,23 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_PTR_UTIL_H_
+#define BASE_MEMORY_PTR_UTIL_H_
+
+#include <memory>
+#include <utility>
+
+namespace base {
+
+// Helper to transfer ownership of a raw pointer to a std::unique_ptr<T>.
+// Note that std::unique_ptr<T> has very different semantics from
+// std::unique_ptr<T[]>: do not use this helper for array allocations.
+template <typename T>
+std::unique_ptr<T> WrapUnique(T* ptr) {
+ return std::unique_ptr<T>(ptr);
+}
+
+} // namespace base
+
+#endif // BASE_MEMORY_PTR_UTIL_H_
diff --git a/security/sandbox/chromium/base/memory/raw_scoped_refptr_mismatch_checker.h b/security/sandbox/chromium/base/memory/raw_scoped_refptr_mismatch_checker.h
new file mode 100644
index 0000000000..ab8b2abcbb
--- /dev/null
+++ b/security/sandbox/chromium/base/memory/raw_scoped_refptr_mismatch_checker.h
@@ -0,0 +1,52 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_RAW_SCOPED_REFPTR_MISMATCH_CHECKER_H_
+#define BASE_MEMORY_RAW_SCOPED_REFPTR_MISMATCH_CHECKER_H_
+
+#include <type_traits>
+
+#include "base/template_util.h"
+
+// It is dangerous to post a task with a T* argument where T is a subtype of
+// RefCounted(Base|ThreadSafeBase), since by the time the parameter is used, the
+// object may already have been deleted since it was not held with a
+// scoped_refptr. Example: http://crbug.com/27191
+// The following set of traits are designed to generate a compile error
+// whenever this antipattern is attempted.
+
+namespace base {
+
+// This is a base internal implementation file used by task.h and callback.h.
+// Not for public consumption, so we wrap it in namespace internal.
+namespace internal {
+
+template <typename T, typename = void>
+struct IsRefCountedType : std::false_type {};
+
+template <typename T>
+struct IsRefCountedType<T,
+ void_t<decltype(std::declval<T*>()->AddRef()),
+ decltype(std::declval<T*>()->Release())>>
+ : std::true_type {};
+
+template <typename T>
+struct NeedsScopedRefptrButGetsRawPtr {
+ static_assert(!std::is_reference<T>::value,
+ "NeedsScopedRefptrButGetsRawPtr requires non-reference type.");
+
+ enum {
+ // Human readable translation: you needed to be a scoped_refptr if you are a
+ // raw pointer type and are convertible to a RefCounted(Base|ThreadSafeBase)
+ // type.
+ value = std::is_pointer<T>::value &&
+ IsRefCountedType<std::remove_pointer_t<T>>::value
+ };
+};
+
+} // namespace internal
+
+} // namespace base
+
+#endif // BASE_MEMORY_RAW_SCOPED_REFPTR_MISMATCH_CHECKER_H_
diff --git a/security/sandbox/chromium/base/memory/ref_counted.cc b/security/sandbox/chromium/base/memory/ref_counted.cc
new file mode 100644
index 0000000000..0a8d32ebf0
--- /dev/null
+++ b/security/sandbox/chromium/base/memory/ref_counted.cc
@@ -0,0 +1,105 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/ref_counted.h"
+
+#include <limits>
+#include <type_traits>
+
+#include "base/threading/thread_collision_warner.h"
+
+namespace base {
+namespace {
+
+#if DCHECK_IS_ON()
+std::atomic_int g_cross_thread_ref_count_access_allow_count(0);
+#endif
+
+} // namespace
+
+namespace subtle {
+
+bool RefCountedThreadSafeBase::HasOneRef() const {
+ return ref_count_.IsOne();
+}
+
+bool RefCountedThreadSafeBase::HasAtLeastOneRef() const {
+ return !ref_count_.IsZero();
+}
+
+#if DCHECK_IS_ON()
+RefCountedThreadSafeBase::~RefCountedThreadSafeBase() {
+ DCHECK(in_dtor_) << "RefCountedThreadSafe object deleted without "
+ "calling Release()";
+}
+#endif
+
+// For security and correctness, we check the arithmetic on ref counts.
+//
+// In an attempt to avoid binary bloat (from inlining the `CHECK`), we define
+// these functions out-of-line. However, compilers are wily. Further testing may
+// show that `NOINLINE` helps or hurts.
+//
+#if defined(ARCH_CPU_64_BITS)
+void RefCountedBase::AddRefImpl() const {
+ // An attacker could induce use-after-free bugs, and potentially exploit them,
+ // by creating so many references to a ref-counted object that the reference
+ // count overflows. On 32-bit architectures, there is not enough address space
+ // to succeed. But on 64-bit architectures, it might indeed be possible.
+ // Therefore, we can elide the check for arithmetic overflow on 32-bit, but we
+ // must check on 64-bit.
+ //
+ // Make sure the addition didn't wrap back around to 0. This form of check
+ // works because we assert that `ref_count_` is an unsigned integer type.
+ CHECK(++ref_count_ != 0);
+}
+
+void RefCountedBase::ReleaseImpl() const {
+ // Make sure the subtraction didn't wrap back around from 0 to the max value.
+ // That could cause memory leaks, and may induce application-semantic
+ // correctness or safety bugs. (E.g. what if we really needed that object to
+ // be destroyed at the right time?)
+ //
+ // Note that unlike with overflow, underflow could also happen on 32-bit
+ // architectures. Arguably, we should do this check on32-bit machines too.
+ CHECK(--ref_count_ != std::numeric_limits<decltype(ref_count_)>::max());
+}
+#endif
+
+#if !defined(ARCH_CPU_X86_FAMILY)
+bool RefCountedThreadSafeBase::Release() const {
+ return ReleaseImpl();
+}
+void RefCountedThreadSafeBase::AddRef() const {
+ AddRefImpl();
+}
+void RefCountedThreadSafeBase::AddRefWithCheck() const {
+ AddRefWithCheckImpl();
+}
+#endif
+
+#if DCHECK_IS_ON()
+bool RefCountedBase::CalledOnValidSequence() const {
+#if defined(MOZ_SANDBOX)
+ return true;
+#else
+ return sequence_checker_.CalledOnValidSequence() ||
+ g_cross_thread_ref_count_access_allow_count.load() != 0;
+#endif
+}
+#endif
+
+} // namespace subtle
+
+#if DCHECK_IS_ON()
+ScopedAllowCrossThreadRefCountAccess::ScopedAllowCrossThreadRefCountAccess() {
+ ++g_cross_thread_ref_count_access_allow_count;
+}
+
+ScopedAllowCrossThreadRefCountAccess::~ScopedAllowCrossThreadRefCountAccess() {
+ --g_cross_thread_ref_count_access_allow_count;
+}
+#endif
+
+} // namespace base
diff --git a/security/sandbox/chromium/base/memory/ref_counted.h b/security/sandbox/chromium/base/memory/ref_counted.h
new file mode 100644
index 0000000000..ac7183a49d
--- /dev/null
+++ b/security/sandbox/chromium/base/memory/ref_counted.h
@@ -0,0 +1,463 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_REF_COUNTED_H_
+#define BASE_MEMORY_REF_COUNTED_H_
+
+#include <stddef.h>
+
+#include <utility>
+
+#include "base/atomic_ref_count.h"
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "base/gtest_prod_util.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/scoped_refptr.h"
+#include "base/sequence_checker.h"
+#include "base/threading/thread_collision_warner.h"
+#include "build/build_config.h"
+
+namespace base {
+namespace subtle {
+
+class BASE_EXPORT RefCountedBase {
+ public:
+ bool HasOneRef() const { return ref_count_ == 1; }
+ bool HasAtLeastOneRef() const { return ref_count_ >= 1; }
+
+ protected:
+ explicit RefCountedBase(StartRefCountFromZeroTag) {
+#if DCHECK_IS_ON()
+ sequence_checker_.DetachFromSequence();
+#endif
+ }
+
+ explicit RefCountedBase(StartRefCountFromOneTag) : ref_count_(1) {
+#if DCHECK_IS_ON()
+ needs_adopt_ref_ = true;
+ sequence_checker_.DetachFromSequence();
+#endif
+ }
+
+ ~RefCountedBase() {
+#if DCHECK_IS_ON()
+ DCHECK(in_dtor_) << "RefCounted object deleted without calling Release()";
+#endif
+ }
+
+ void AddRef() const {
+ // TODO(maruel): Add back once it doesn't assert 500 times/sec.
+ // Current thread books the critical section "AddRelease"
+ // without release it.
+ // DFAKE_SCOPED_LOCK_THREAD_LOCKED(add_release_);
+#if DCHECK_IS_ON()
+ DCHECK(!in_dtor_);
+ DCHECK(!needs_adopt_ref_)
+ << "This RefCounted object is created with non-zero reference count."
+ << " The first reference to such a object has to be made by AdoptRef or"
+ << " MakeRefCounted.";
+ if (ref_count_ >= 1) {
+ DCHECK(CalledOnValidSequence());
+ }
+#endif
+
+ AddRefImpl();
+ }
+
+ // Returns true if the object should self-delete.
+ bool Release() const {
+ ReleaseImpl();
+
+ // TODO(maruel): Add back once it doesn't assert 500 times/sec.
+ // Current thread books the critical section "AddRelease"
+ // without release it.
+ // DFAKE_SCOPED_LOCK_THREAD_LOCKED(add_release_);
+
+#if DCHECK_IS_ON()
+ DCHECK(!in_dtor_);
+ if (ref_count_ == 0)
+ in_dtor_ = true;
+
+ if (ref_count_ >= 1)
+ DCHECK(CalledOnValidSequence());
+ if (ref_count_ == 1)
+ sequence_checker_.DetachFromSequence();
+#endif
+
+ return ref_count_ == 0;
+ }
+
+ // Returns true if it is safe to read or write the object, from a thread
+ // safety standpoint. Should be DCHECK'd from the methods of RefCounted
+ // classes if there is a danger of objects being shared across threads.
+ //
+ // This produces fewer false positives than adding a separate SequenceChecker
+ // into the subclass, because it automatically detaches from the sequence when
+ // the reference count is 1 (and never fails if there is only one reference).
+ //
+ // This means unlike a separate SequenceChecker, it will permit a singly
+ // referenced object to be passed between threads (not holding a reference on
+ // the sending thread), but will trap if the sending thread holds onto a
+ // reference, or if the object is accessed from multiple threads
+ // simultaneously.
+ bool IsOnValidSequence() const {
+#if DCHECK_IS_ON()
+ return ref_count_ <= 1 || CalledOnValidSequence();
+#else
+ return true;
+#endif
+ }
+
+ private:
+ template <typename U>
+ friend scoped_refptr<U> base::AdoptRef(U*);
+
+ FRIEND_TEST_ALL_PREFIXES(RefCountedDeathTest, TestOverflowCheck);
+
+ void Adopted() const {
+#if DCHECK_IS_ON()
+ DCHECK(needs_adopt_ref_);
+ needs_adopt_ref_ = false;
+#endif
+ }
+
+#if defined(ARCH_CPU_64_BITS)
+ void AddRefImpl() const;
+ void ReleaseImpl() const;
+#else
+ void AddRefImpl() const { ++ref_count_; }
+ void ReleaseImpl() const { --ref_count_; }
+#endif
+
+#if DCHECK_IS_ON()
+ bool CalledOnValidSequence() const;
+#endif
+
+ mutable uint32_t ref_count_ = 0;
+ static_assert(std::is_unsigned<decltype(ref_count_)>::value,
+ "ref_count_ must be an unsigned type.");
+
+#if DCHECK_IS_ON()
+ mutable bool needs_adopt_ref_ = false;
+ mutable bool in_dtor_ = false;
+ mutable SequenceChecker sequence_checker_;
+#endif
+
+ DFAKE_MUTEX(add_release_);
+
+ DISALLOW_COPY_AND_ASSIGN(RefCountedBase);
+};
+
+class BASE_EXPORT RefCountedThreadSafeBase {
+ public:
+ bool HasOneRef() const;
+ bool HasAtLeastOneRef() const;
+
+ protected:
+ explicit constexpr RefCountedThreadSafeBase(StartRefCountFromZeroTag) {}
+ explicit constexpr RefCountedThreadSafeBase(StartRefCountFromOneTag)
+ : ref_count_(1) {
+#if DCHECK_IS_ON()
+ needs_adopt_ref_ = true;
+#endif
+ }
+
+#if DCHECK_IS_ON()
+ ~RefCountedThreadSafeBase();
+#else
+ ~RefCountedThreadSafeBase() = default;
+#endif
+
+// Release and AddRef are suitable for inlining on X86 because they generate
+// very small code sequences. On other platforms (ARM), it causes a size
+// regression and is probably not worth it.
+#if defined(ARCH_CPU_X86_FAMILY)
+ // Returns true if the object should self-delete.
+ bool Release() const { return ReleaseImpl(); }
+ void AddRef() const { AddRefImpl(); }
+ void AddRefWithCheck() const { AddRefWithCheckImpl(); }
+#else
+ // Returns true if the object should self-delete.
+ bool Release() const;
+ void AddRef() const;
+ void AddRefWithCheck() const;
+#endif
+
+ private:
+ template <typename U>
+ friend scoped_refptr<U> base::AdoptRef(U*);
+
+ void Adopted() const {
+#if DCHECK_IS_ON()
+ DCHECK(needs_adopt_ref_);
+ needs_adopt_ref_ = false;
+#endif
+ }
+
+ ALWAYS_INLINE void AddRefImpl() const {
+#if DCHECK_IS_ON()
+ DCHECK(!in_dtor_);
+ DCHECK(!needs_adopt_ref_)
+ << "This RefCounted object is created with non-zero reference count."
+ << " The first reference to such a object has to be made by AdoptRef or"
+ << " MakeRefCounted.";
+#endif
+ ref_count_.Increment();
+ }
+
+ ALWAYS_INLINE void AddRefWithCheckImpl() const {
+#if DCHECK_IS_ON()
+ DCHECK(!in_dtor_);
+ DCHECK(!needs_adopt_ref_)
+ << "This RefCounted object is created with non-zero reference count."
+ << " The first reference to such a object has to be made by AdoptRef or"
+ << " MakeRefCounted.";
+#endif
+ CHECK(ref_count_.Increment() > 0);
+ }
+
+ ALWAYS_INLINE bool ReleaseImpl() const {
+#if DCHECK_IS_ON()
+ DCHECK(!in_dtor_);
+ DCHECK(!ref_count_.IsZero());
+#endif
+ if (!ref_count_.Decrement()) {
+#if DCHECK_IS_ON()
+ in_dtor_ = true;
+#endif
+ return true;
+ }
+ return false;
+ }
+
+ mutable AtomicRefCount ref_count_{0};
+#if DCHECK_IS_ON()
+ mutable bool needs_adopt_ref_ = false;
+ mutable bool in_dtor_ = false;
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(RefCountedThreadSafeBase);
+};
+
+} // namespace subtle
+
+// ScopedAllowCrossThreadRefCountAccess disables the check documented on
+// RefCounted below for rare pre-existing use cases where thread-safety was
+// guaranteed through other means (e.g. explicit sequencing of calls across
+// execution sequences when bouncing between threads in order). New callers
+// should refrain from using this (callsites handling thread-safety through
+// locks should use RefCountedThreadSafe per the overhead of its atomics being
+// negligible compared to locks anyways and callsites doing explicit sequencing
+// should properly std::move() the ref to avoid hitting this check).
+// TODO(tzik): Cleanup existing use cases and remove
+// ScopedAllowCrossThreadRefCountAccess.
+class BASE_EXPORT ScopedAllowCrossThreadRefCountAccess final {
+ public:
+#if DCHECK_IS_ON()
+ ScopedAllowCrossThreadRefCountAccess();
+ ~ScopedAllowCrossThreadRefCountAccess();
+#else
+ ScopedAllowCrossThreadRefCountAccess() {}
+ ~ScopedAllowCrossThreadRefCountAccess() {}
+#endif
+};
+
+//
+// A base class for reference counted classes. Otherwise, known as a cheap
+// knock-off of WebKit's RefCounted<T> class. To use this, just extend your
+// class from it like so:
+//
+// class MyFoo : public base::RefCounted<MyFoo> {
+// ...
+// private:
+// friend class base::RefCounted<MyFoo>;
+// ~MyFoo();
+// };
+//
+// You should always make your destructor non-public, to avoid any code deleting
+// the object accidently while there are references to it.
+//
+//
+// The ref count manipulation to RefCounted is NOT thread safe and has DCHECKs
+// to trap unsafe cross thread usage. A subclass instance of RefCounted can be
+// passed to another execution sequence only when its ref count is 1. If the ref
+// count is more than 1, the RefCounted class verifies the ref updates are made
+// on the same execution sequence as the previous ones. The subclass can also
+// manually call IsOnValidSequence to trap other non-thread-safe accesses; see
+// the documentation for that method.
+//
+//
+// The reference count starts from zero by default, and we intended to migrate
+// to start-from-one ref count. Put REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE() to
+// the ref counted class to opt-in.
+//
+// If an object has start-from-one ref count, the first scoped_refptr need to be
+// created by base::AdoptRef() or base::MakeRefCounted(). We can use
+// base::MakeRefCounted() to create create both type of ref counted object.
+//
+// The motivations to use start-from-one ref count are:
+// - Start-from-one ref count doesn't need the ref count increment for the
+// first reference.
+// - It can detect an invalid object acquisition for a being-deleted object
+// that has zero ref count. That tends to happen on custom deleter that
+// delays the deletion.
+// TODO(tzik): Implement invalid acquisition detection.
+// - Behavior parity to Blink's WTF::RefCounted, whose count starts from one.
+// And start-from-one ref count is a step to merge WTF::RefCounted into
+// base::RefCounted.
+//
+#define REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE() \
+ static constexpr ::base::subtle::StartRefCountFromOneTag \
+ kRefCountPreference = ::base::subtle::kStartRefCountFromOneTag
+
+template <class T, typename Traits>
+class RefCounted;
+
+template <typename T>
+struct DefaultRefCountedTraits {
+ static void Destruct(const T* x) {
+ RefCounted<T, DefaultRefCountedTraits>::DeleteInternal(x);
+ }
+};
+
+template <class T, typename Traits = DefaultRefCountedTraits<T>>
+class RefCounted : public subtle::RefCountedBase {
+ public:
+ static constexpr subtle::StartRefCountFromZeroTag kRefCountPreference =
+ subtle::kStartRefCountFromZeroTag;
+
+ RefCounted() : subtle::RefCountedBase(T::kRefCountPreference) {}
+
+ void AddRef() const {
+ subtle::RefCountedBase::AddRef();
+ }
+
+ void Release() const {
+ if (subtle::RefCountedBase::Release()) {
+ // Prune the code paths which the static analyzer may take to simulate
+ // object destruction. Use-after-free errors aren't possible given the
+ // lifetime guarantees of the refcounting system.
+ ANALYZER_SKIP_THIS_PATH();
+
+ Traits::Destruct(static_cast<const T*>(this));
+ }
+ }
+
+ protected:
+ ~RefCounted() = default;
+
+ private:
+ friend struct DefaultRefCountedTraits<T>;
+ template <typename U>
+ static void DeleteInternal(const U* x) {
+ delete x;
+ }
+
+ DISALLOW_COPY_AND_ASSIGN(RefCounted);
+};
+
+// Forward declaration.
+template <class T, typename Traits> class RefCountedThreadSafe;
+
+// Default traits for RefCountedThreadSafe<T>. Deletes the object when its ref
+// count reaches 0. Overload to delete it on a different thread etc.
+template<typename T>
+struct DefaultRefCountedThreadSafeTraits {
+ static void Destruct(const T* x) {
+ // Delete through RefCountedThreadSafe to make child classes only need to be
+ // friend with RefCountedThreadSafe instead of this struct, which is an
+ // implementation detail.
+ RefCountedThreadSafe<T,
+ DefaultRefCountedThreadSafeTraits>::DeleteInternal(x);
+ }
+};
+
+//
+// A thread-safe variant of RefCounted<T>
+//
+// class MyFoo : public base::RefCountedThreadSafe<MyFoo> {
+// ...
+// };
+//
+// If you're using the default trait, then you should add compile time
+// asserts that no one else is deleting your object. i.e.
+// private:
+// friend class base::RefCountedThreadSafe<MyFoo>;
+// ~MyFoo();
+//
+// We can use REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE() with RefCountedThreadSafe
+// too. See the comment above the RefCounted definition for details.
+template <class T, typename Traits = DefaultRefCountedThreadSafeTraits<T> >
+class RefCountedThreadSafe : public subtle::RefCountedThreadSafeBase {
+ public:
+ static constexpr subtle::StartRefCountFromZeroTag kRefCountPreference =
+ subtle::kStartRefCountFromZeroTag;
+
+ explicit RefCountedThreadSafe()
+ : subtle::RefCountedThreadSafeBase(T::kRefCountPreference) {}
+
+ void AddRef() const { AddRefImpl(T::kRefCountPreference); }
+
+ void Release() const {
+ if (subtle::RefCountedThreadSafeBase::Release()) {
+ ANALYZER_SKIP_THIS_PATH();
+ Traits::Destruct(static_cast<const T*>(this));
+ }
+ }
+
+ protected:
+ ~RefCountedThreadSafe() = default;
+
+ private:
+ friend struct DefaultRefCountedThreadSafeTraits<T>;
+ template <typename U>
+ static void DeleteInternal(const U* x) {
+ delete x;
+ }
+
+ void AddRefImpl(subtle::StartRefCountFromZeroTag) const {
+ subtle::RefCountedThreadSafeBase::AddRef();
+ }
+
+ void AddRefImpl(subtle::StartRefCountFromOneTag) const {
+ subtle::RefCountedThreadSafeBase::AddRefWithCheck();
+ }
+
+ DISALLOW_COPY_AND_ASSIGN(RefCountedThreadSafe);
+};
+
+//
+// A thread-safe wrapper for some piece of data so we can place other
+// things in scoped_refptrs<>.
+//
+template<typename T>
+class RefCountedData
+ : public base::RefCountedThreadSafe< base::RefCountedData<T> > {
+ public:
+ RefCountedData() : data() {}
+ RefCountedData(const T& in_value) : data(in_value) {}
+ RefCountedData(T&& in_value) : data(std::move(in_value)) {}
+
+ T data;
+
+ private:
+ friend class base::RefCountedThreadSafe<base::RefCountedData<T> >;
+ ~RefCountedData() = default;
+};
+
+template <typename T>
+bool operator==(const RefCountedData<T>& lhs, const RefCountedData<T>& rhs) {
+ return lhs.data == rhs.data;
+}
+
+template <typename T>
+bool operator!=(const RefCountedData<T>& lhs, const RefCountedData<T>& rhs) {
+ return !(lhs == rhs);
+}
+
+} // namespace base
+
+#endif // BASE_MEMORY_REF_COUNTED_H_
diff --git a/security/sandbox/chromium/base/memory/scoped_refptr.h b/security/sandbox/chromium/base/memory/scoped_refptr.h
new file mode 100644
index 0000000000..238b61a736
--- /dev/null
+++ b/security/sandbox/chromium/base/memory/scoped_refptr.h
@@ -0,0 +1,375 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_SCOPED_REFPTR_H_
+#define BASE_MEMORY_SCOPED_REFPTR_H_
+
+#include <stddef.h>
+
+#include <iosfwd>
+#include <type_traits>
+#include <utility>
+
+#include "base/compiler_specific.h"
+#include "base/logging.h"
+#include "base/macros.h"
+
+template <class T>
+class scoped_refptr;
+
+namespace base {
+
+template <class, typename>
+class RefCounted;
+template <class, typename>
+class RefCountedThreadSafe;
+class SequencedTaskRunner;
+class WrappedPromise;
+
+template <typename T>
+scoped_refptr<T> AdoptRef(T* t);
+
+namespace internal {
+
+class BasePromise;
+
+} // namespace internal
+
+namespace subtle {
+
+enum AdoptRefTag { kAdoptRefTag };
+enum StartRefCountFromZeroTag { kStartRefCountFromZeroTag };
+enum StartRefCountFromOneTag { kStartRefCountFromOneTag };
+
+template <typename T, typename U, typename V>
+constexpr bool IsRefCountPreferenceOverridden(const T*,
+ const RefCounted<U, V>*) {
+ return !std::is_same<std::decay_t<decltype(T::kRefCountPreference)>,
+ std::decay_t<decltype(U::kRefCountPreference)>>::value;
+}
+
+template <typename T, typename U, typename V>
+constexpr bool IsRefCountPreferenceOverridden(
+ const T*,
+ const RefCountedThreadSafe<U, V>*) {
+ return !std::is_same<std::decay_t<decltype(T::kRefCountPreference)>,
+ std::decay_t<decltype(U::kRefCountPreference)>>::value;
+}
+
+constexpr bool IsRefCountPreferenceOverridden(...) {
+ return false;
+}
+
+} // namespace subtle
+
+// Creates a scoped_refptr from a raw pointer without incrementing the reference
+// count. Use this only for a newly created object whose reference count starts
+// from 1 instead of 0.
+template <typename T>
+scoped_refptr<T> AdoptRef(T* obj) {
+ using Tag = std::decay_t<decltype(T::kRefCountPreference)>;
+ static_assert(std::is_same<subtle::StartRefCountFromOneTag, Tag>::value,
+ "Use AdoptRef only if the reference count starts from one.");
+
+ DCHECK(obj);
+ DCHECK(obj->HasOneRef());
+ obj->Adopted();
+ return scoped_refptr<T>(obj, subtle::kAdoptRefTag);
+}
+
+namespace subtle {
+
+template <typename T>
+scoped_refptr<T> AdoptRefIfNeeded(T* obj, StartRefCountFromZeroTag) {
+ return scoped_refptr<T>(obj);
+}
+
+template <typename T>
+scoped_refptr<T> AdoptRefIfNeeded(T* obj, StartRefCountFromOneTag) {
+ return AdoptRef(obj);
+}
+
+} // namespace subtle
+
+// Constructs an instance of T, which is a ref counted type, and wraps the
+// object into a scoped_refptr<T>.
+template <typename T, typename... Args>
+scoped_refptr<T> MakeRefCounted(Args&&... args) {
+ T* obj = new T(std::forward<Args>(args)...);
+ return subtle::AdoptRefIfNeeded(obj, T::kRefCountPreference);
+}
+
+// Takes an instance of T, which is a ref counted type, and wraps the object
+// into a scoped_refptr<T>.
+template <typename T>
+scoped_refptr<T> WrapRefCounted(T* t) {
+ return scoped_refptr<T>(t);
+}
+
+} // namespace base
+
+//
+// A smart pointer class for reference counted objects. Use this class instead
+// of calling AddRef and Release manually on a reference counted object to
+// avoid common memory leaks caused by forgetting to Release an object
+// reference. Sample usage:
+//
+// class MyFoo : public RefCounted<MyFoo> {
+// ...
+// private:
+// friend class RefCounted<MyFoo>; // Allow destruction by RefCounted<>.
+// ~MyFoo(); // Destructor must be private/protected.
+// };
+//
+// void some_function() {
+// scoped_refptr<MyFoo> foo = MakeRefCounted<MyFoo>();
+// foo->Method(param);
+// // |foo| is released when this function returns
+// }
+//
+// void some_other_function() {
+// scoped_refptr<MyFoo> foo = MakeRefCounted<MyFoo>();
+// ...
+// foo.reset(); // explicitly releases |foo|
+// ...
+// if (foo)
+// foo->Method(param);
+// }
+//
+// The above examples show how scoped_refptr<T> acts like a pointer to T.
+// Given two scoped_refptr<T> classes, it is also possible to exchange
+// references between the two objects, like so:
+//
+// {
+// scoped_refptr<MyFoo> a = MakeRefCounted<MyFoo>();
+// scoped_refptr<MyFoo> b;
+//
+// b.swap(a);
+// // now, |b| references the MyFoo object, and |a| references nullptr.
+// }
+//
+// To make both |a| and |b| in the above example reference the same MyFoo
+// object, simply use the assignment operator:
+//
+// {
+// scoped_refptr<MyFoo> a = MakeRefCounted<MyFoo>();
+// scoped_refptr<MyFoo> b;
+//
+// b = a;
+// // now, |a| and |b| each own a reference to the same MyFoo object.
+// }
+//
+// Also see Chromium's ownership and calling conventions:
+// https://chromium.googlesource.com/chromium/src/+/lkgr/styleguide/c++/c++.md#object-ownership-and-calling-conventions
+// Specifically:
+// If the function (at least sometimes) takes a ref on a refcounted object,
+// declare the param as scoped_refptr<T>. The caller can decide whether it
+// wishes to transfer ownership (by calling std::move(t) when passing t) or
+// retain its ref (by simply passing t directly).
+// In other words, use scoped_refptr like you would a std::unique_ptr except
+// in the odd case where it's required to hold on to a ref while handing one
+// to another component (if a component merely needs to use t on the stack
+// without keeping a ref: pass t as a raw T*).
+template <class T>
+class scoped_refptr {
+ public:
+ typedef T element_type;
+
+ constexpr scoped_refptr() = default;
+
+ // Allow implicit construction from nullptr.
+ constexpr scoped_refptr(std::nullptr_t) {}
+
+ // Constructs from a raw pointer. Note that this constructor allows implicit
+ // conversion from T* to scoped_refptr<T> which is strongly discouraged. If
+ // you are creating a new ref-counted object please use
+ // base::MakeRefCounted<T>() or base::WrapRefCounted<T>(). Otherwise you
+ // should move or copy construct from an existing scoped_refptr<T> to the
+ // ref-counted object.
+ scoped_refptr(T* p) : ptr_(p) {
+ if (ptr_)
+ AddRef(ptr_);
+ }
+
+ // Copy constructor. This is required in addition to the copy conversion
+ // constructor below.
+ scoped_refptr(const scoped_refptr& r) : scoped_refptr(r.ptr_) {}
+
+ // Copy conversion constructor.
+ template <typename U,
+ typename = typename std::enable_if<
+ std::is_convertible<U*, T*>::value>::type>
+ scoped_refptr(const scoped_refptr<U>& r) : scoped_refptr(r.ptr_) {}
+
+ // Move constructor. This is required in addition to the move conversion
+ // constructor below.
+ scoped_refptr(scoped_refptr&& r) noexcept : ptr_(r.ptr_) { r.ptr_ = nullptr; }
+
+ // Move conversion constructor.
+ template <typename U,
+ typename = typename std::enable_if<
+ std::is_convertible<U*, T*>::value>::type>
+ scoped_refptr(scoped_refptr<U>&& r) noexcept : ptr_(r.ptr_) {
+ r.ptr_ = nullptr;
+ }
+
+ ~scoped_refptr() {
+ static_assert(!base::subtle::IsRefCountPreferenceOverridden(
+ static_cast<T*>(nullptr), static_cast<T*>(nullptr)),
+ "It's unsafe to override the ref count preference."
+ " Please remove REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE"
+ " from subclasses.");
+ if (ptr_)
+ Release(ptr_);
+ }
+
+ T* get() const { return ptr_; }
+
+ T& operator*() const {
+ DCHECK(ptr_);
+ return *ptr_;
+ }
+
+ T* operator->() const {
+ DCHECK(ptr_);
+ return ptr_;
+ }
+
+ scoped_refptr& operator=(std::nullptr_t) {
+ reset();
+ return *this;
+ }
+
+ scoped_refptr& operator=(T* p) { return *this = scoped_refptr(p); }
+
+ // Unified assignment operator.
+ scoped_refptr& operator=(scoped_refptr r) noexcept {
+ swap(r);
+ return *this;
+ }
+
+ // Sets managed object to null and releases reference to the previous managed
+ // object, if it existed.
+ void reset() { scoped_refptr().swap(*this); }
+
+ void swap(scoped_refptr& r) noexcept { std::swap(ptr_, r.ptr_); }
+
+ explicit operator bool() const { return ptr_ != nullptr; }
+
+ template <typename U>
+ bool operator==(const scoped_refptr<U>& rhs) const {
+ return ptr_ == rhs.get();
+ }
+
+ template <typename U>
+ bool operator!=(const scoped_refptr<U>& rhs) const {
+ return !operator==(rhs);
+ }
+
+ template <typename U>
+ bool operator<(const scoped_refptr<U>& rhs) const {
+ return ptr_ < rhs.get();
+ }
+
+ protected:
+ T* ptr_ = nullptr;
+
+ private:
+ template <typename U>
+ friend scoped_refptr<U> base::AdoptRef(U*);
+ friend class ::base::SequencedTaskRunner;
+
+ // Friend access so these classes can use the constructor below as part of a
+ // binary size optimization.
+ friend class ::base::internal::BasePromise;
+ friend class ::base::WrappedPromise;
+
+ // Returns the owned pointer (if any), releasing ownership to the caller. The
+ // caller is responsible for managing the lifetime of the reference.
+ T* release();
+
+ scoped_refptr(T* p, base::subtle::AdoptRefTag) : ptr_(p) {}
+
+ // Friend required for move constructors that set r.ptr_ to null.
+ template <typename U>
+ friend class scoped_refptr;
+
+ // Non-inline helpers to allow:
+ // class Opaque;
+ // extern template class scoped_refptr<Opaque>;
+ // Otherwise the compiler will complain that Opaque is an incomplete type.
+ static void AddRef(T* ptr);
+ static void Release(T* ptr);
+};
+
+template <typename T>
+T* scoped_refptr<T>::release() {
+ T* ptr = ptr_;
+ ptr_ = nullptr;
+ return ptr;
+}
+
+// static
+template <typename T>
+void scoped_refptr<T>::AddRef(T* ptr) {
+ ptr->AddRef();
+}
+
+// static
+template <typename T>
+void scoped_refptr<T>::Release(T* ptr) {
+ ptr->Release();
+}
+
+template <typename T, typename U>
+bool operator==(const scoped_refptr<T>& lhs, const U* rhs) {
+ return lhs.get() == rhs;
+}
+
+template <typename T, typename U>
+bool operator==(const T* lhs, const scoped_refptr<U>& rhs) {
+ return lhs == rhs.get();
+}
+
+template <typename T>
+bool operator==(const scoped_refptr<T>& lhs, std::nullptr_t null) {
+ return !static_cast<bool>(lhs);
+}
+
+template <typename T>
+bool operator==(std::nullptr_t null, const scoped_refptr<T>& rhs) {
+ return !static_cast<bool>(rhs);
+}
+
+template <typename T, typename U>
+bool operator!=(const scoped_refptr<T>& lhs, const U* rhs) {
+ return !operator==(lhs, rhs);
+}
+
+template <typename T, typename U>
+bool operator!=(const T* lhs, const scoped_refptr<U>& rhs) {
+ return !operator==(lhs, rhs);
+}
+
+template <typename T>
+bool operator!=(const scoped_refptr<T>& lhs, std::nullptr_t null) {
+ return !operator==(lhs, null);
+}
+
+template <typename T>
+bool operator!=(std::nullptr_t null, const scoped_refptr<T>& rhs) {
+ return !operator==(null, rhs);
+}
+
+template <typename T>
+std::ostream& operator<<(std::ostream& out, const scoped_refptr<T>& p) {
+ return out << p.get();
+}
+
+template <typename T>
+void swap(scoped_refptr<T>& lhs, scoped_refptr<T>& rhs) noexcept {
+ lhs.swap(rhs);
+}
+
+#endif // BASE_MEMORY_SCOPED_REFPTR_H_
diff --git a/security/sandbox/chromium/base/memory/shared_memory_mapping.cc b/security/sandbox/chromium/base/memory/shared_memory_mapping.cc
new file mode 100644
index 0000000000..8426fa8c21
--- /dev/null
+++ b/security/sandbox/chromium/base/memory/shared_memory_mapping.cc
@@ -0,0 +1,115 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory_mapping.h"
+
+#include <utility>
+
+#include "base/logging.h"
+#include "base/memory/shared_memory_tracker.h"
+#include "base/unguessable_token.h"
+#include "build/build_config.h"
+
+#if defined(OS_POSIX)
+#include <sys/mman.h>
+#endif
+
+#if defined(OS_WIN)
+#include <aclapi.h>
+#endif
+
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+#include <mach/mach_vm.h>
+#include "base/mac/mach_logging.h"
+#endif
+
+#if defined(OS_FUCHSIA)
+#include <lib/zx/vmar.h>
+#include "base/fuchsia/fuchsia_logging.h"
+#endif
+
+namespace base {
+
+SharedMemoryMapping::SharedMemoryMapping() = default;
+
+SharedMemoryMapping::SharedMemoryMapping(SharedMemoryMapping&& mapping) noexcept
+ : memory_(mapping.memory_),
+ size_(mapping.size_),
+ mapped_size_(mapping.mapped_size_),
+ guid_(mapping.guid_) {
+ mapping.memory_ = nullptr;
+}
+
+SharedMemoryMapping& SharedMemoryMapping::operator=(
+ SharedMemoryMapping&& mapping) noexcept {
+ Unmap();
+ memory_ = mapping.memory_;
+ size_ = mapping.size_;
+ mapped_size_ = mapping.mapped_size_;
+ guid_ = mapping.guid_;
+ mapping.memory_ = nullptr;
+ return *this;
+}
+
+SharedMemoryMapping::~SharedMemoryMapping() {
+ Unmap();
+}
+
+SharedMemoryMapping::SharedMemoryMapping(void* memory,
+ size_t size,
+ size_t mapped_size,
+ const UnguessableToken& guid)
+ : memory_(memory), size_(size), mapped_size_(mapped_size), guid_(guid) {
+ SharedMemoryTracker::GetInstance()->IncrementMemoryUsage(*this);
+}
+
+void SharedMemoryMapping::Unmap() {
+ if (!IsValid())
+ return;
+
+ SharedMemoryTracker::GetInstance()->DecrementMemoryUsage(*this);
+#if defined(OS_WIN)
+ if (!UnmapViewOfFile(memory_))
+ DPLOG(ERROR) << "UnmapViewOfFile";
+#elif defined(OS_FUCHSIA)
+ uintptr_t addr = reinterpret_cast<uintptr_t>(memory_);
+ zx_status_t status = zx::vmar::root_self()->unmap(addr, mapped_size_);
+ if (status != ZX_OK)
+ ZX_DLOG(ERROR, status) << "zx_vmar_unmap";
+#elif defined(OS_MACOSX) && !defined(OS_IOS)
+ kern_return_t kr = mach_vm_deallocate(
+ mach_task_self(), reinterpret_cast<mach_vm_address_t>(memory_),
+ mapped_size_);
+ MACH_DLOG_IF(ERROR, kr != KERN_SUCCESS, kr) << "mach_vm_deallocate";
+#else
+ if (munmap(memory_, mapped_size_) < 0)
+ DPLOG(ERROR) << "munmap";
+#endif
+}
+
+ReadOnlySharedMemoryMapping::ReadOnlySharedMemoryMapping() = default;
+ReadOnlySharedMemoryMapping::ReadOnlySharedMemoryMapping(
+ ReadOnlySharedMemoryMapping&&) noexcept = default;
+ReadOnlySharedMemoryMapping& ReadOnlySharedMemoryMapping::operator=(
+ ReadOnlySharedMemoryMapping&&) noexcept = default;
+ReadOnlySharedMemoryMapping::ReadOnlySharedMemoryMapping(
+ void* address,
+ size_t size,
+ size_t mapped_size,
+ const UnguessableToken& guid)
+ : SharedMemoryMapping(address, size, mapped_size, guid) {}
+
+WritableSharedMemoryMapping::WritableSharedMemoryMapping() = default;
+WritableSharedMemoryMapping::WritableSharedMemoryMapping(
+ WritableSharedMemoryMapping&&) noexcept = default;
+WritableSharedMemoryMapping& WritableSharedMemoryMapping::operator=(
+ WritableSharedMemoryMapping&&) noexcept = default;
+WritableSharedMemoryMapping::WritableSharedMemoryMapping(
+ void* address,
+ size_t size,
+ size_t mapped_size,
+ const UnguessableToken& guid)
+ : SharedMemoryMapping(address, size, mapped_size, guid) {}
+
+} // namespace base
diff --git a/security/sandbox/chromium/base/memory/shared_memory_mapping.h b/security/sandbox/chromium/base/memory/shared_memory_mapping.h
new file mode 100644
index 0000000000..2b8858e166
--- /dev/null
+++ b/security/sandbox/chromium/base/memory/shared_memory_mapping.h
@@ -0,0 +1,252 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_SHARED_MEMORY_MAPPING_H_
+#define BASE_MEMORY_SHARED_MEMORY_MAPPING_H_
+
+#include <cstddef>
+#include <type_traits>
+
+#include "base/containers/buffer_iterator.h"
+#include "base/containers/span.h"
+#include "base/macros.h"
+#include "base/unguessable_token.h"
+
+namespace base {
+
+namespace subtle {
+class PlatformSharedMemoryRegion;
+} // namespace subtle
+
+// Base class for scoped handles to a shared memory mapping created from a
+// shared memory region. Created shared memory mappings remain valid even if the
+// creator region is transferred or destroyed.
+//
+// Each mapping has an UnguessableToken that identifies the shared memory region
+// it was created from. This is used for memory metrics, to avoid overcounting
+// shared memory.
+class BASE_EXPORT SharedMemoryMapping {
+ public:
+ // Default constructor initializes an invalid instance.
+ SharedMemoryMapping();
+
+ // Move operations are allowed.
+ SharedMemoryMapping(SharedMemoryMapping&& mapping) noexcept;
+ SharedMemoryMapping& operator=(SharedMemoryMapping&& mapping) noexcept;
+
+ // Unmaps the region if the mapping is valid.
+ virtual ~SharedMemoryMapping();
+
+ // Returns true iff the mapping is valid. False means there is no
+ // corresponding area of memory.
+ bool IsValid() const { return memory_ != nullptr; }
+
+ // Returns the logical size of the mapping in bytes. This is precisely the
+ // size requested by whoever created the mapping, and it is always less than
+ // or equal to |mapped_size()|. This is undefined for invalid instances.
+ size_t size() const {
+ DCHECK(IsValid());
+ return size_;
+ }
+
+ // Returns the actual size of the mapping in bytes. This is always at least
+ // as large as |size()| but may be larger due to platform mapping alignment
+ // constraints. This is undefined for invalid instances.
+ size_t mapped_size() const {
+ DCHECK(IsValid());
+ return mapped_size_;
+ }
+
+ // Returns 128-bit GUID of the region this mapping belongs to.
+ const UnguessableToken& guid() const {
+ DCHECK(IsValid());
+ return guid_;
+ }
+
+ protected:
+ SharedMemoryMapping(void* address,
+ size_t size,
+ size_t mapped_size,
+ const UnguessableToken& guid);
+ void* raw_memory_ptr() const { return memory_; }
+
+ private:
+ friend class SharedMemoryTracker;
+
+ void Unmap();
+
+ void* memory_ = nullptr;
+ size_t size_ = 0;
+ size_t mapped_size_ = 0;
+ UnguessableToken guid_;
+
+ DISALLOW_COPY_AND_ASSIGN(SharedMemoryMapping);
+};
+
+// Class modeling a read-only mapping of a shared memory region into the
+// current process' address space. This is created by ReadOnlySharedMemoryRegion
+// instances.
+class BASE_EXPORT ReadOnlySharedMemoryMapping : public SharedMemoryMapping {
+ public:
+ // Default constructor initializes an invalid instance.
+ ReadOnlySharedMemoryMapping();
+
+ // Move operations are allowed.
+ ReadOnlySharedMemoryMapping(ReadOnlySharedMemoryMapping&&) noexcept;
+ ReadOnlySharedMemoryMapping& operator=(
+ ReadOnlySharedMemoryMapping&&) noexcept;
+
+ // Returns the base address of the mapping. This is read-only memory. This is
+ // page-aligned. This is nullptr for invalid instances.
+ const void* memory() const { return raw_memory_ptr(); }
+
+ // Returns a pointer to a page-aligned const T if the mapping is valid and
+ // large enough to contain a T, or nullptr otherwise.
+ template <typename T>
+ const T* GetMemoryAs() const {
+ static_assert(std::is_trivially_copyable<T>::value,
+ "Copying non-trivially-copyable object across memory spaces "
+ "is dangerous");
+ if (!IsValid())
+ return nullptr;
+ if (sizeof(T) > size())
+ return nullptr;
+ return static_cast<const T*>(raw_memory_ptr());
+ }
+
+ // Returns a span of const T. The number of elements is autodeduced from the
+ // size of the shared memory mapping. The number of elements may be
+ // autodeduced as zero, i.e. the mapping is invalid or the size of the mapping
+ // isn't large enough to contain even one T: in that case, an empty span
+ // will be returned. The first element, if any, is guaranteed to be
+ // page-aligned.
+ template <typename T>
+ span<const T> GetMemoryAsSpan() const {
+ static_assert(std::is_trivially_copyable<T>::value,
+ "Copying non-trivially-copyable object across memory spaces "
+ "is dangerous");
+ if (!IsValid())
+ return span<const T>();
+ size_t count = size() / sizeof(T);
+ return GetMemoryAsSpan<T>(count);
+ }
+
+ // Returns a span of const T with |count| elements if the mapping is valid and
+ // large enough to contain |count| elements, or an empty span otherwise. The
+ // first element, if any, is guaranteed to be page-aligned.
+ template <typename T>
+ span<const T> GetMemoryAsSpan(size_t count) const {
+ static_assert(std::is_trivially_copyable<T>::value,
+ "Copying non-trivially-copyable object across memory spaces "
+ "is dangerous");
+ if (!IsValid())
+ return span<const T>();
+ if (size() / sizeof(T) < count)
+ return span<const T>();
+ return span<const T>(static_cast<const T*>(raw_memory_ptr()), count);
+ }
+
+ // Returns a BufferIterator of const T.
+ template <typename T>
+ BufferIterator<const T> GetMemoryAsBufferIterator() const {
+ return BufferIterator<const T>(GetMemoryAsSpan<T>());
+ }
+
+ private:
+ friend class ReadOnlySharedMemoryRegion;
+ ReadOnlySharedMemoryMapping(void* address,
+ size_t size,
+ size_t mapped_size,
+ const UnguessableToken& guid);
+
+ DISALLOW_COPY_AND_ASSIGN(ReadOnlySharedMemoryMapping);
+};
+
+// Class modeling a writable mapping of a shared memory region into the
+// current process' address space. This is created by *SharedMemoryRegion
+// instances.
+class BASE_EXPORT WritableSharedMemoryMapping : public SharedMemoryMapping {
+ public:
+ // Default constructor initializes an invalid instance.
+ WritableSharedMemoryMapping();
+
+ // Move operations are allowed.
+ WritableSharedMemoryMapping(WritableSharedMemoryMapping&&) noexcept;
+ WritableSharedMemoryMapping& operator=(
+ WritableSharedMemoryMapping&&) noexcept;
+
+ // Returns the base address of the mapping. This is writable memory. This is
+ // page-aligned. This is nullptr for invalid instances.
+ void* memory() const { return raw_memory_ptr(); }
+
+ // Returns a pointer to a page-aligned T if the mapping is valid and large
+ // enough to contain a T, or nullptr otherwise.
+ template <typename T>
+ T* GetMemoryAs() const {
+ static_assert(std::is_trivially_copyable<T>::value,
+ "Copying non-trivially-copyable object across memory spaces "
+ "is dangerous");
+ if (!IsValid())
+ return nullptr;
+ if (sizeof(T) > size())
+ return nullptr;
+ return static_cast<T*>(raw_memory_ptr());
+ }
+
+ // Returns a span of T. The number of elements is autodeduced from the size of
+ // the shared memory mapping. The number of elements may be autodeduced as
+ // zero, i.e. the mapping is invalid or the size of the mapping isn't large
+ // enough to contain even one T: in that case, an empty span will be returned.
+ // The first element, if any, is guaranteed to be page-aligned.
+ template <typename T>
+ span<T> GetMemoryAsSpan() const {
+ static_assert(std::is_trivially_copyable<T>::value,
+ "Copying non-trivially-copyable object across memory spaces "
+ "is dangerous");
+ if (!IsValid())
+ return span<T>();
+ size_t count = size() / sizeof(T);
+ return GetMemoryAsSpan<T>(count);
+ }
+
+ // Returns a span of T with |count| elements if the mapping is valid and large
+ // enough to contain |count| elements, or an empty span otherwise. The first
+ // element, if any, is guaranteed to be page-aligned.
+ template <typename T>
+ span<T> GetMemoryAsSpan(size_t count) const {
+ static_assert(std::is_trivially_copyable<T>::value,
+ "Copying non-trivially-copyable object across memory spaces "
+ "is dangerous");
+ if (!IsValid())
+ return span<T>();
+ if (size() / sizeof(T) < count)
+ return span<T>();
+ return span<T>(static_cast<T*>(raw_memory_ptr()), count);
+ }
+
+ // Returns a BufferIterator of T.
+ template <typename T>
+ BufferIterator<T> GetMemoryAsBufferIterator() {
+ return BufferIterator<T>(GetMemoryAsSpan<T>());
+ }
+
+ private:
+ friend WritableSharedMemoryMapping MapAtForTesting(
+ subtle::PlatformSharedMemoryRegion* region,
+ off_t offset,
+ size_t size);
+ friend class ReadOnlySharedMemoryRegion;
+ friend class WritableSharedMemoryRegion;
+ friend class UnsafeSharedMemoryRegion;
+ WritableSharedMemoryMapping(void* address,
+ size_t size,
+ size_t mapped_size,
+ const UnguessableToken& guid);
+
+ DISALLOW_COPY_AND_ASSIGN(WritableSharedMemoryMapping);
+};
+
+} // namespace base
+
+#endif // BASE_MEMORY_SHARED_MEMORY_MAPPING_H_
diff --git a/security/sandbox/chromium/base/memory/singleton.h b/security/sandbox/chromium/base/memory/singleton.h
new file mode 100644
index 0000000000..87b57919c0
--- /dev/null
+++ b/security/sandbox/chromium/base/memory/singleton.h
@@ -0,0 +1,279 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+// PLEASE READ: Do you really need a singleton? If possible, use a
+// function-local static of type base::NoDestructor<T> instead:
+//
+// Factory& Factory::GetInstance() {
+// static base::NoDestructor<Factory> instance;
+// return *instance;
+// }
+// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+//
+// Singletons make it hard to determine the lifetime of an object, which can
+// lead to buggy code and spurious crashes.
+//
+// Instead of adding another singleton into the mix, try to identify either:
+// a) An existing singleton that can manage your object's lifetime
+// b) Locations where you can deterministically create the object and pass
+// into other objects
+//
+// If you absolutely need a singleton, please keep them as trivial as possible
+// and ideally a leaf dependency. Singletons get problematic when they attempt
+// to do too much in their destructor or have circular dependencies.
+
+#ifndef BASE_MEMORY_SINGLETON_H_
+#define BASE_MEMORY_SINGLETON_H_
+
+#include "base/at_exit.h"
+#include "base/atomicops.h"
+#include "base/base_export.h"
+#include "base/lazy_instance_helpers.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/threading/thread_restrictions.h"
+
+namespace base {
+
+// Default traits for Singleton<Type>. Calls operator new and operator delete on
+// the object. Registers automatic deletion at process exit.
+// Overload if you need arguments or another memory allocation function.
+template<typename Type>
+struct DefaultSingletonTraits {
+ // Allocates the object.
+ static Type* New() {
+ // The parenthesis is very important here; it forces POD type
+ // initialization.
+ return new Type();
+ }
+
+ // Destroys the object.
+ static void Delete(Type* x) {
+ delete x;
+ }
+
+ // Set to true to automatically register deletion of the object on process
+ // exit. See below for the required call that makes this happen.
+ static const bool kRegisterAtExit = true;
+
+#if DCHECK_IS_ON()
+ // Set to false to disallow access on a non-joinable thread. This is
+ // different from kRegisterAtExit because StaticMemorySingletonTraits allows
+ // access on non-joinable threads, and gracefully handles this.
+ static const bool kAllowedToAccessOnNonjoinableThread = false;
+#endif
+};
+
+
+// Alternate traits for use with the Singleton<Type>. Identical to
+// DefaultSingletonTraits except that the Singleton will not be cleaned up
+// at exit.
+template<typename Type>
+struct LeakySingletonTraits : public DefaultSingletonTraits<Type> {
+ static const bool kRegisterAtExit = false;
+#if DCHECK_IS_ON()
+ static const bool kAllowedToAccessOnNonjoinableThread = true;
+#endif
+};
+
+// Alternate traits for use with the Singleton<Type>. Allocates memory
+// for the singleton instance from a static buffer. The singleton will
+// be cleaned up at exit, but can't be revived after destruction unless
+// the ResurrectForTesting() method is called.
+//
+// This is useful for a certain category of things, notably logging and
+// tracing, where the singleton instance is of a type carefully constructed to
+// be safe to access post-destruction.
+// In logging and tracing you'll typically get stray calls at odd times, like
+// during static destruction, thread teardown and the like, and there's a
+// termination race on the heap-based singleton - e.g. if one thread calls
+// get(), but then another thread initiates AtExit processing, the first thread
+// may call into an object residing in unallocated memory. If the instance is
+// allocated from the data segment, then this is survivable.
+//
+// The destructor is to deallocate system resources, in this case to unregister
+// a callback the system will invoke when logging levels change. Note that
+// this is also used in e.g. Chrome Frame, where you have to allow for the
+// possibility of loading briefly into someone else's process space, and
+// so leaking is not an option, as that would sabotage the state of your host
+// process once you've unloaded.
+template <typename Type>
+struct StaticMemorySingletonTraits {
+ // WARNING: User has to support a New() which returns null.
+ static Type* New() {
+ // Only constructs once and returns pointer; otherwise returns null.
+ if (subtle::NoBarrier_AtomicExchange(&dead_, 1))
+ return nullptr;
+
+ return new (buffer_) Type();
+ }
+
+ static void Delete(Type* p) {
+ if (p)
+ p->Type::~Type();
+ }
+
+ static const bool kRegisterAtExit = true;
+
+#if DCHECK_IS_ON()
+ static const bool kAllowedToAccessOnNonjoinableThread = true;
+#endif
+
+ static void ResurrectForTesting() { subtle::NoBarrier_Store(&dead_, 0); }
+
+ private:
+ alignas(Type) static char buffer_[sizeof(Type)];
+ // Signal the object was already deleted, so it is not revived.
+ static subtle::Atomic32 dead_;
+};
+
+template <typename Type>
+alignas(Type) char StaticMemorySingletonTraits<Type>::buffer_[sizeof(Type)];
+template <typename Type>
+subtle::Atomic32 StaticMemorySingletonTraits<Type>::dead_ = 0;
+
+// The Singleton<Type, Traits, DifferentiatingType> class manages a single
+// instance of Type which will be created on first use and will be destroyed at
+// normal process exit). The Trait::Delete function will not be called on
+// abnormal process exit.
+//
+// DifferentiatingType is used as a key to differentiate two different
+// singletons having the same memory allocation functions but serving a
+// different purpose. This is mainly used for Locks serving different purposes.
+//
+// Example usage:
+//
+// In your header:
+// namespace base {
+// template <typename T>
+// struct DefaultSingletonTraits;
+// }
+// class FooClass {
+// public:
+// static FooClass* GetInstance(); <-- See comment below on this.
+// void Bar() { ... }
+// private:
+// FooClass() { ... }
+// friend struct base::DefaultSingletonTraits<FooClass>;
+//
+// DISALLOW_COPY_AND_ASSIGN(FooClass);
+// };
+//
+// In your source file:
+// #include "base/memory/singleton.h"
+// FooClass* FooClass::GetInstance() {
+// return base::Singleton<FooClass>::get();
+// }
+//
+// Or for leaky singletons:
+// #include "base/memory/singleton.h"
+// FooClass* FooClass::GetInstance() {
+// return base::Singleton<
+// FooClass, base::LeakySingletonTraits<FooClass>>::get();
+// }
+//
+// And to call methods on FooClass:
+// FooClass::GetInstance()->Bar();
+//
+// NOTE: The method accessing Singleton<T>::get() has to be named as GetInstance
+// and it is important that FooClass::GetInstance() is not inlined in the
+// header. This makes sure that when source files from multiple targets include
+// this header they don't end up with different copies of the inlined code
+// creating multiple copies of the singleton.
+//
+// Singleton<> has no non-static members and doesn't need to actually be
+// instantiated.
+//
+// This class is itself thread-safe. The underlying Type must of course be
+// thread-safe if you want to use it concurrently. Two parameters may be tuned
+// depending on the user's requirements.
+//
+// Glossary:
+// RAE = kRegisterAtExit
+//
+// On every platform, if Traits::RAE is true, the singleton will be destroyed at
+// process exit. More precisely it uses AtExitManager which requires an
+// object of this type to be instantiated. AtExitManager mimics the semantics
+// of atexit() such as LIFO order but under Windows is safer to call. For more
+// information see at_exit.h.
+//
+// If Traits::RAE is false, the singleton will not be freed at process exit,
+// thus the singleton will be leaked if it is ever accessed. Traits::RAE
+// shouldn't be false unless absolutely necessary. Remember that the heap where
+// the object is allocated may be destroyed by the CRT anyway.
+//
+// Caveats:
+// (a) Every call to get(), operator->() and operator*() incurs some overhead
+// (16ns on my P4/2.8GHz) to check whether the object has already been
+// initialized. You may wish to cache the result of get(); it will not
+// change.
+//
+// (b) Your factory function must never throw an exception. This class is not
+// exception-safe.
+//
+
+template <typename Type,
+ typename Traits = DefaultSingletonTraits<Type>,
+ typename DifferentiatingType = Type>
+class Singleton {
+ private:
+ // A class T using the Singleton<T> pattern should declare a GetInstance()
+ // method and call Singleton::get() from within that. T may also declare a
+ // GetInstanceIfExists() method to invoke Singleton::GetIfExists().
+ friend Type;
+
+ // This class is safe to be constructed and copy-constructed since it has no
+ // member.
+
+ // Returns a pointer to the one true instance of the class.
+ static Type* get() {
+#if DCHECK_IS_ON()
+ if (!Traits::kAllowedToAccessOnNonjoinableThread)
+ ThreadRestrictions::AssertSingletonAllowed();
+#endif
+
+ return subtle::GetOrCreateLazyPointer(
+ &instance_, &CreatorFunc, nullptr,
+ Traits::kRegisterAtExit ? OnExit : nullptr, nullptr);
+ }
+
+ // Returns the same result as get() if the instance exists but doesn't
+ // construct it (and returns null) if it doesn't.
+ static Type* GetIfExists() {
+#if DCHECK_IS_ON()
+ if (!Traits::kAllowedToAccessOnNonjoinableThread)
+ ThreadRestrictions::AssertSingletonAllowed();
+#endif
+
+ if (!subtle::NoBarrier_Load(&instance_))
+ return nullptr;
+
+ // Need to invoke get() nonetheless as some Traits return null after
+ // destruction (even though |instance_| still holds garbage).
+ return get();
+ }
+
+ // Internal method used as an adaptor for GetOrCreateLazyPointer(). Do not use
+ // outside of that use case.
+ static Type* CreatorFunc(void* /* creator_arg*/) { return Traits::New(); }
+
+ // Adapter function for use with AtExit(). This should be called single
+ // threaded, so don't use atomic operations.
+ // Calling OnExit while singleton is in use by other threads is a mistake.
+ static void OnExit(void* /*unused*/) {
+ // AtExit should only ever be register after the singleton instance was
+ // created. We should only ever get here with a valid instance_ pointer.
+ Traits::Delete(reinterpret_cast<Type*>(subtle::NoBarrier_Load(&instance_)));
+ instance_ = 0;
+ }
+ static subtle::AtomicWord instance_;
+};
+
+template <typename Type, typename Traits, typename DifferentiatingType>
+subtle::AtomicWord Singleton<Type, Traits, DifferentiatingType>::instance_ = 0;
+
+} // namespace base
+
+#endif // BASE_MEMORY_SINGLETON_H_
diff --git a/security/sandbox/chromium/base/memory/unsafe_shared_memory_region.cc b/security/sandbox/chromium/base/memory/unsafe_shared_memory_region.cc
new file mode 100644
index 0000000000..92385d3e78
--- /dev/null
+++ b/security/sandbox/chromium/base/memory/unsafe_shared_memory_region.cc
@@ -0,0 +1,80 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/unsafe_shared_memory_region.h"
+
+#include <utility>
+
+namespace base {
+
+UnsafeSharedMemoryRegion::CreateFunction*
+ UnsafeSharedMemoryRegion::create_hook_ = nullptr;
+
+// static
+UnsafeSharedMemoryRegion UnsafeSharedMemoryRegion::Create(size_t size) {
+ if (create_hook_)
+ return create_hook_(size);
+
+ subtle::PlatformSharedMemoryRegion handle =
+ subtle::PlatformSharedMemoryRegion::CreateUnsafe(size);
+
+ return UnsafeSharedMemoryRegion(std::move(handle));
+}
+
+// static
+UnsafeSharedMemoryRegion UnsafeSharedMemoryRegion::Deserialize(
+ subtle::PlatformSharedMemoryRegion handle) {
+ return UnsafeSharedMemoryRegion(std::move(handle));
+}
+
+// static
+subtle::PlatformSharedMemoryRegion
+UnsafeSharedMemoryRegion::TakeHandleForSerialization(
+ UnsafeSharedMemoryRegion region) {
+ return std::move(region.handle_);
+}
+
+UnsafeSharedMemoryRegion::UnsafeSharedMemoryRegion() = default;
+UnsafeSharedMemoryRegion::UnsafeSharedMemoryRegion(
+ UnsafeSharedMemoryRegion&& region) = default;
+UnsafeSharedMemoryRegion& UnsafeSharedMemoryRegion::operator=(
+ UnsafeSharedMemoryRegion&& region) = default;
+UnsafeSharedMemoryRegion::~UnsafeSharedMemoryRegion() = default;
+
+UnsafeSharedMemoryRegion UnsafeSharedMemoryRegion::Duplicate() const {
+ return UnsafeSharedMemoryRegion(handle_.Duplicate());
+}
+
+WritableSharedMemoryMapping UnsafeSharedMemoryRegion::Map() const {
+ return MapAt(0, handle_.GetSize());
+}
+
+WritableSharedMemoryMapping UnsafeSharedMemoryRegion::MapAt(off_t offset,
+ size_t size) const {
+ if (!IsValid())
+ return {};
+
+ void* memory = nullptr;
+ size_t mapped_size = 0;
+ if (!handle_.MapAt(offset, size, &memory, &mapped_size))
+ return {};
+
+ return WritableSharedMemoryMapping(memory, size, mapped_size,
+ handle_.GetGUID());
+}
+
+bool UnsafeSharedMemoryRegion::IsValid() const {
+ return handle_.IsValid();
+}
+
+UnsafeSharedMemoryRegion::UnsafeSharedMemoryRegion(
+ subtle::PlatformSharedMemoryRegion handle)
+ : handle_(std::move(handle)) {
+ if (handle_.IsValid()) {
+ CHECK_EQ(handle_.GetMode(),
+ subtle::PlatformSharedMemoryRegion::Mode::kUnsafe);
+ }
+}
+
+} // namespace base
diff --git a/security/sandbox/chromium/base/memory/unsafe_shared_memory_region.h b/security/sandbox/chromium/base/memory/unsafe_shared_memory_region.h
new file mode 100644
index 0000000000..559d4c6830
--- /dev/null
+++ b/security/sandbox/chromium/base/memory/unsafe_shared_memory_region.h
@@ -0,0 +1,127 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_UNSAFE_SHARED_MEMORY_REGION_H_
+#define BASE_MEMORY_UNSAFE_SHARED_MEMORY_REGION_H_
+
+#include "base/gtest_prod_util.h"
+#include "base/macros.h"
+#include "base/memory/platform_shared_memory_region.h"
+#include "base/memory/shared_memory_mapping.h"
+
+namespace base {
+
+// Scoped move-only handle to a region of platform shared memory. The instance
+// owns the platform handle it wraps. Mappings created by this region are
+// writable. These mappings remain valid even after the region handle is moved
+// or destroyed.
+//
+// NOTE: UnsafeSharedMemoryRegion cannot be converted to a read-only region. Use
+// with caution as the region will be writable to any process with a handle to
+// the region.
+//
+// Use this if and only if the following is true:
+// - You do not need to share the region as read-only, and,
+// - You need to have several instances of the region simultaneously, possibly
+// in different processes, that can produce writable mappings.
+
+class BASE_EXPORT UnsafeSharedMemoryRegion {
+ public:
+ using MappingType = WritableSharedMemoryMapping;
+ // Creates a new UnsafeSharedMemoryRegion instance of a given size that can be
+ // used for mapping writable shared memory into the virtual address space.
+ //
+ // This call will fail if the process does not have sufficient permissions to
+ // create a shared memory region itself. See
+ // mojo::CreateUnsafeSharedMemoryRegion in
+ // mojo/public/cpp/base/shared_memory_utils.h for creating a shared memory
+ // region from a an unprivileged process where a broker must be used.
+ static UnsafeSharedMemoryRegion Create(size_t size);
+ using CreateFunction = decltype(Create);
+
+ // Returns an UnsafeSharedMemoryRegion built from a platform-specific handle
+ // that was taken from another UnsafeSharedMemoryRegion instance. Returns an
+ // invalid region iff the |handle| is invalid. CHECK-fails if the |handle|
+ // isn't unsafe.
+ // This should be used only by the code passing a handle across
+ // process boundaries.
+ static UnsafeSharedMemoryRegion Deserialize(
+ subtle::PlatformSharedMemoryRegion handle);
+
+ // Extracts a platform handle from the region. Ownership is transferred to the
+ // returned region object.
+ // This should be used only for sending the handle from the current
+ // process to another.
+ static subtle::PlatformSharedMemoryRegion TakeHandleForSerialization(
+ UnsafeSharedMemoryRegion region);
+
+ // Default constructor initializes an invalid instance.
+ UnsafeSharedMemoryRegion();
+
+ // Move operations are allowed.
+ UnsafeSharedMemoryRegion(UnsafeSharedMemoryRegion&&);
+ UnsafeSharedMemoryRegion& operator=(UnsafeSharedMemoryRegion&&);
+
+ // Destructor closes shared memory region if valid.
+ // All created mappings will remain valid.
+ ~UnsafeSharedMemoryRegion();
+
+ // Duplicates the underlying platform handle and creates a new
+ // UnsafeSharedMemoryRegion instance that owns the newly created handle.
+ // Returns a valid UnsafeSharedMemoryRegion on success, invalid otherwise.
+ // The current region instance remains valid in any case.
+ UnsafeSharedMemoryRegion Duplicate() const;
+
+ // Maps the shared memory region into the caller's address space with write
+ // access. The mapped address is guaranteed to have an alignment of
+ // at least |subtle::PlatformSharedMemoryRegion::kMapMinimumAlignment|.
+ // Returns a valid WritableSharedMemoryMapping instance on success, invalid
+ // otherwise.
+ WritableSharedMemoryMapping Map() const;
+
+ // Same as above, but maps only |size| bytes of the shared memory region
+ // starting with the given |offset|. |offset| must be aligned to value of
+ // |SysInfo::VMAllocationGranularity()|. Returns an invalid mapping if
+ // requested bytes are out of the region limits.
+ WritableSharedMemoryMapping MapAt(off_t offset, size_t size) const;
+
+ // Whether the underlying platform handle is valid.
+ bool IsValid() const;
+
+ // Returns the maximum mapping size that can be created from this region.
+ size_t GetSize() const {
+ DCHECK(IsValid());
+ return handle_.GetSize();
+ }
+
+ // Returns 128-bit GUID of the region.
+ const UnguessableToken& GetGUID() const {
+ DCHECK(IsValid());
+ return handle_.GetGUID();
+ }
+
+ // Returns a platform shared memory handle. |this| remains the owner of the
+ // handle.
+ subtle::PlatformSharedMemoryRegion::PlatformHandle GetPlatformHandle() const {
+ DCHECK(IsValid());
+ return handle_.GetPlatformHandle();
+ }
+
+ private:
+ friend class SharedMemoryHooks;
+
+ explicit UnsafeSharedMemoryRegion(subtle::PlatformSharedMemoryRegion handle);
+
+ static void set_create_hook(CreateFunction* hook) { create_hook_ = hook; }
+
+ static CreateFunction* create_hook_;
+
+ subtle::PlatformSharedMemoryRegion handle_;
+
+ DISALLOW_COPY_AND_ASSIGN(UnsafeSharedMemoryRegion);
+};
+
+} // namespace base
+
+#endif // BASE_MEMORY_UNSAFE_SHARED_MEMORY_REGION_H_
diff --git a/security/sandbox/chromium/base/memory/weak_ptr.h b/security/sandbox/chromium/base/memory/weak_ptr.h
new file mode 100644
index 0000000000..d274987168
--- /dev/null
+++ b/security/sandbox/chromium/base/memory/weak_ptr.h
@@ -0,0 +1,395 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Weak pointers are pointers to an object that do not affect its lifetime,
+// and which may be invalidated (i.e. reset to nullptr) by the object, or its
+// owner, at any time, most commonly when the object is about to be deleted.
+
+// Weak pointers are useful when an object needs to be accessed safely by one
+// or more objects other than its owner, and those callers can cope with the
+// object vanishing and e.g. tasks posted to it being silently dropped.
+// Reference-counting such an object would complicate the ownership graph and
+// make it harder to reason about the object's lifetime.
+
+// EXAMPLE:
+//
+// class Controller {
+// public:
+// void SpawnWorker() { Worker::StartNew(weak_factory_.GetWeakPtr()); }
+// void WorkComplete(const Result& result) { ... }
+// private:
+// // Member variables should appear before the WeakPtrFactory, to ensure
+// // that any WeakPtrs to Controller are invalidated before its members
+// // variable's destructors are executed, rendering them invalid.
+// WeakPtrFactory<Controller> weak_factory_{this};
+// };
+//
+// class Worker {
+// public:
+// static void StartNew(const WeakPtr<Controller>& controller) {
+// Worker* worker = new Worker(controller);
+// // Kick off asynchronous processing...
+// }
+// private:
+// Worker(const WeakPtr<Controller>& controller)
+// : controller_(controller) {}
+// void DidCompleteAsynchronousProcessing(const Result& result) {
+// if (controller_)
+// controller_->WorkComplete(result);
+// }
+// WeakPtr<Controller> controller_;
+// };
+//
+// With this implementation a caller may use SpawnWorker() to dispatch multiple
+// Workers and subsequently delete the Controller, without waiting for all
+// Workers to have completed.
+
+// ------------------------- IMPORTANT: Thread-safety -------------------------
+
+// Weak pointers may be passed safely between sequences, but must always be
+// dereferenced and invalidated on the same SequencedTaskRunner otherwise
+// checking the pointer would be racey.
+//
+// To ensure correct use, the first time a WeakPtr issued by a WeakPtrFactory
+// is dereferenced, the factory and its WeakPtrs become bound to the calling
+// sequence or current SequencedWorkerPool token, and cannot be dereferenced or
+// invalidated on any other task runner. Bound WeakPtrs can still be handed
+// off to other task runners, e.g. to use to post tasks back to object on the
+// bound sequence.
+//
+// If all WeakPtr objects are destroyed or invalidated then the factory is
+// unbound from the SequencedTaskRunner/Thread. The WeakPtrFactory may then be
+// destroyed, or new WeakPtr objects may be used, from a different sequence.
+//
+// Thus, at least one WeakPtr object must exist and have been dereferenced on
+// the correct sequence to enforce that other WeakPtr objects will enforce they
+// are used on the desired sequence.
+
+#ifndef BASE_MEMORY_WEAK_PTR_H_
+#define BASE_MEMORY_WEAK_PTR_H_
+
+#include <cstddef>
+#include <type_traits>
+
+#include "base/base_export.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/sequence_checker.h"
+#include "base/synchronization/atomic_flag.h"
+
+namespace base {
+
+template <typename T> class SupportsWeakPtr;
+template <typename T> class WeakPtr;
+
+namespace internal {
+// These classes are part of the WeakPtr implementation.
+// DO NOT USE THESE CLASSES DIRECTLY YOURSELF.
+
+class BASE_EXPORT WeakReference {
+ public:
+ // Although Flag is bound to a specific SequencedTaskRunner, it may be
+ // deleted from another via base::WeakPtr::~WeakPtr().
+ class BASE_EXPORT Flag : public RefCountedThreadSafe<Flag> {
+ public:
+ Flag();
+
+ void Invalidate();
+ bool IsValid() const;
+
+ bool MaybeValid() const;
+
+ void DetachFromSequence();
+
+ private:
+ friend class base::RefCountedThreadSafe<Flag>;
+
+ ~Flag();
+
+ SEQUENCE_CHECKER(sequence_checker_);
+ AtomicFlag invalidated_;
+ };
+
+ WeakReference();
+ explicit WeakReference(const scoped_refptr<Flag>& flag);
+ ~WeakReference();
+
+ WeakReference(WeakReference&& other) noexcept;
+ WeakReference(const WeakReference& other);
+ WeakReference& operator=(WeakReference&& other) noexcept = default;
+ WeakReference& operator=(const WeakReference& other) = default;
+
+ bool IsValid() const;
+ bool MaybeValid() const;
+
+ private:
+ scoped_refptr<const Flag> flag_;
+};
+
+class BASE_EXPORT WeakReferenceOwner {
+ public:
+ WeakReferenceOwner();
+ ~WeakReferenceOwner();
+
+ WeakReference GetRef() const;
+
+ bool HasRefs() const { return !flag_->HasOneRef(); }
+
+ void Invalidate();
+
+ private:
+ scoped_refptr<WeakReference::Flag> flag_;
+};
+
+// This class simplifies the implementation of WeakPtr's type conversion
+// constructor by avoiding the need for a public accessor for ref_. A
+// WeakPtr<T> cannot access the private members of WeakPtr<U>, so this
+// base class gives us a way to access ref_ in a protected fashion.
+class BASE_EXPORT WeakPtrBase {
+ public:
+ WeakPtrBase();
+ ~WeakPtrBase();
+
+ WeakPtrBase(const WeakPtrBase& other) = default;
+ WeakPtrBase(WeakPtrBase&& other) noexcept = default;
+ WeakPtrBase& operator=(const WeakPtrBase& other) = default;
+ WeakPtrBase& operator=(WeakPtrBase&& other) noexcept = default;
+
+ void reset() {
+ ref_ = internal::WeakReference();
+ ptr_ = 0;
+ }
+
+ protected:
+ WeakPtrBase(const WeakReference& ref, uintptr_t ptr);
+
+ WeakReference ref_;
+
+ // This pointer is only valid when ref_.is_valid() is true. Otherwise, its
+ // value is undefined (as opposed to nullptr).
+ uintptr_t ptr_;
+};
+
+// This class provides a common implementation of common functions that would
+// otherwise get instantiated separately for each distinct instantiation of
+// SupportsWeakPtr<>.
+class SupportsWeakPtrBase {
+ public:
+ // A safe static downcast of a WeakPtr<Base> to WeakPtr<Derived>. This
+ // conversion will only compile if there is exists a Base which inherits
+ // from SupportsWeakPtr<Base>. See base::AsWeakPtr() below for a helper
+ // function that makes calling this easier.
+ //
+ // Precondition: t != nullptr
+ template<typename Derived>
+ static WeakPtr<Derived> StaticAsWeakPtr(Derived* t) {
+ static_assert(
+ std::is_base_of<internal::SupportsWeakPtrBase, Derived>::value,
+ "AsWeakPtr argument must inherit from SupportsWeakPtr");
+ return AsWeakPtrImpl<Derived>(t);
+ }
+
+ private:
+ // This template function uses type inference to find a Base of Derived
+ // which is an instance of SupportsWeakPtr<Base>. We can then safely
+ // static_cast the Base* to a Derived*.
+ template <typename Derived, typename Base>
+ static WeakPtr<Derived> AsWeakPtrImpl(SupportsWeakPtr<Base>* t) {
+ WeakPtr<Base> ptr = t->AsWeakPtr();
+ return WeakPtr<Derived>(
+ ptr.ref_, static_cast<Derived*>(reinterpret_cast<Base*>(ptr.ptr_)));
+ }
+};
+
+} // namespace internal
+
+template <typename T> class WeakPtrFactory;
+
+// The WeakPtr class holds a weak reference to |T*|.
+//
+// This class is designed to be used like a normal pointer. You should always
+// null-test an object of this class before using it or invoking a method that
+// may result in the underlying object being destroyed.
+//
+// EXAMPLE:
+//
+// class Foo { ... };
+// WeakPtr<Foo> foo;
+// if (foo)
+// foo->method();
+//
+template <typename T>
+class WeakPtr : public internal::WeakPtrBase {
+ public:
+ WeakPtr() = default;
+ WeakPtr(std::nullptr_t) {}
+
+ // Allow conversion from U to T provided U "is a" T. Note that this
+ // is separate from the (implicit) copy and move constructors.
+ template <typename U>
+ WeakPtr(const WeakPtr<U>& other) : WeakPtrBase(other) {
+ // Need to cast from U* to T* to do pointer adjustment in case of multiple
+ // inheritance. This also enforces the "U is a T" rule.
+ T* t = reinterpret_cast<U*>(other.ptr_);
+ ptr_ = reinterpret_cast<uintptr_t>(t);
+ }
+ template <typename U>
+ WeakPtr(WeakPtr<U>&& other) noexcept : WeakPtrBase(std::move(other)) {
+ // Need to cast from U* to T* to do pointer adjustment in case of multiple
+ // inheritance. This also enforces the "U is a T" rule.
+ T* t = reinterpret_cast<U*>(other.ptr_);
+ ptr_ = reinterpret_cast<uintptr_t>(t);
+ }
+
+ T* get() const {
+ return ref_.IsValid() ? reinterpret_cast<T*>(ptr_) : nullptr;
+ }
+
+ T& operator*() const {
+ DCHECK(get() != nullptr);
+ return *get();
+ }
+ T* operator->() const {
+ DCHECK(get() != nullptr);
+ return get();
+ }
+
+ // Allow conditionals to test validity, e.g. if (weak_ptr) {...};
+ explicit operator bool() const { return get() != nullptr; }
+
+ // Returns false if the WeakPtr is confirmed to be invalid. This call is safe
+ // to make from any thread, e.g. to optimize away unnecessary work, but
+ // operator bool() must always be called, on the correct sequence, before
+ // actually using the pointer.
+ //
+ // Warning: as with any object, this call is only thread-safe if the WeakPtr
+ // instance isn't being re-assigned or reset() racily with this call.
+ bool MaybeValid() const { return ref_.MaybeValid(); }
+
+ // Returns whether the object |this| points to has been invalidated. This can
+ // be used to distinguish a WeakPtr to a destroyed object from one that has
+ // been explicitly set to null.
+ bool WasInvalidated() const { return ptr_ && !ref_.IsValid(); }
+
+ private:
+ friend class internal::SupportsWeakPtrBase;
+ template <typename U> friend class WeakPtr;
+ friend class SupportsWeakPtr<T>;
+ friend class WeakPtrFactory<T>;
+
+ WeakPtr(const internal::WeakReference& ref, T* ptr)
+ : WeakPtrBase(ref, reinterpret_cast<uintptr_t>(ptr)) {}
+};
+
+// Allow callers to compare WeakPtrs against nullptr to test validity.
+template <class T>
+bool operator!=(const WeakPtr<T>& weak_ptr, std::nullptr_t) {
+ return !(weak_ptr == nullptr);
+}
+template <class T>
+bool operator!=(std::nullptr_t, const WeakPtr<T>& weak_ptr) {
+ return weak_ptr != nullptr;
+}
+template <class T>
+bool operator==(const WeakPtr<T>& weak_ptr, std::nullptr_t) {
+ return weak_ptr.get() == nullptr;
+}
+template <class T>
+bool operator==(std::nullptr_t, const WeakPtr<T>& weak_ptr) {
+ return weak_ptr == nullptr;
+}
+
+namespace internal {
+class BASE_EXPORT WeakPtrFactoryBase {
+ protected:
+ WeakPtrFactoryBase(uintptr_t ptr);
+ ~WeakPtrFactoryBase();
+ internal::WeakReferenceOwner weak_reference_owner_;
+ uintptr_t ptr_;
+};
+} // namespace internal
+
+// A class may be composed of a WeakPtrFactory and thereby
+// control how it exposes weak pointers to itself. This is helpful if you only
+// need weak pointers within the implementation of a class. This class is also
+// useful when working with primitive types. For example, you could have a
+// WeakPtrFactory<bool> that is used to pass around a weak reference to a bool.
+template <class T>
+class WeakPtrFactory : public internal::WeakPtrFactoryBase {
+ public:
+ explicit WeakPtrFactory(T* ptr)
+ : WeakPtrFactoryBase(reinterpret_cast<uintptr_t>(ptr)) {}
+
+ ~WeakPtrFactory() = default;
+
+ WeakPtr<T> GetWeakPtr() {
+ return WeakPtr<T>(weak_reference_owner_.GetRef(),
+ reinterpret_cast<T*>(ptr_));
+ }
+
+ // Call this method to invalidate all existing weak pointers.
+ void InvalidateWeakPtrs() {
+ DCHECK(ptr_);
+ weak_reference_owner_.Invalidate();
+ }
+
+ // Call this method to determine if any weak pointers exist.
+ bool HasWeakPtrs() const {
+ DCHECK(ptr_);
+ return weak_reference_owner_.HasRefs();
+ }
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(WeakPtrFactory);
+};
+
+// A class may extend from SupportsWeakPtr to let others take weak pointers to
+// it. This avoids the class itself implementing boilerplate to dispense weak
+// pointers. However, since SupportsWeakPtr's destructor won't invalidate
+// weak pointers to the class until after the derived class' members have been
+// destroyed, its use can lead to subtle use-after-destroy issues.
+template <class T>
+class SupportsWeakPtr : public internal::SupportsWeakPtrBase {
+ public:
+ SupportsWeakPtr() = default;
+
+ WeakPtr<T> AsWeakPtr() {
+ return WeakPtr<T>(weak_reference_owner_.GetRef(), static_cast<T*>(this));
+ }
+
+ protected:
+ ~SupportsWeakPtr() = default;
+
+ private:
+ internal::WeakReferenceOwner weak_reference_owner_;
+ DISALLOW_COPY_AND_ASSIGN(SupportsWeakPtr);
+};
+
+// Helper function that uses type deduction to safely return a WeakPtr<Derived>
+// when Derived doesn't directly extend SupportsWeakPtr<Derived>, instead it
+// extends a Base that extends SupportsWeakPtr<Base>.
+//
+// EXAMPLE:
+// class Base : public base::SupportsWeakPtr<Producer> {};
+// class Derived : public Base {};
+//
+// Derived derived;
+// base::WeakPtr<Derived> ptr = base::AsWeakPtr(&derived);
+//
+// Note that the following doesn't work (invalid type conversion) since
+// Derived::AsWeakPtr() is WeakPtr<Base> SupportsWeakPtr<Base>::AsWeakPtr(),
+// and there's no way to safely cast WeakPtr<Base> to WeakPtr<Derived> at
+// the caller.
+//
+// base::WeakPtr<Derived> ptr = derived.AsWeakPtr(); // Fails.
+
+template <typename Derived>
+WeakPtr<Derived> AsWeakPtr(Derived* t) {
+ return internal::SupportsWeakPtrBase::StaticAsWeakPtr<Derived>(t);
+}
+
+} // namespace base
+
+#endif // BASE_MEMORY_WEAK_PTR_H_
diff --git a/security/sandbox/chromium/base/no_destructor.h b/security/sandbox/chromium/base/no_destructor.h
new file mode 100644
index 0000000000..21cfef8565
--- /dev/null
+++ b/security/sandbox/chromium/base/no_destructor.h
@@ -0,0 +1,98 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NO_DESTRUCTOR_H_
+#define BASE_NO_DESTRUCTOR_H_
+
+#include <new>
+#include <utility>
+
+namespace base {
+
+// A wrapper that makes it easy to create an object of type T with static
+// storage duration that:
+// - is only constructed on first access
+// - never invokes the destructor
+// in order to satisfy the styleguide ban on global constructors and
+// destructors.
+//
+// Runtime constant example:
+// const std::string& GetLineSeparator() {
+// // Forwards to std::string(size_t, char, const Allocator&) constructor.
+// static const base::NoDestructor<std::string> s(5, '-');
+// return *s;
+// }
+//
+// More complex initialization with a lambda:
+// const std::string& GetSessionNonce() {
+// static const base::NoDestructor<std::string> nonce([] {
+// std::string s(16);
+// crypto::RandString(s.data(), s.size());
+// return s;
+// }());
+// return *nonce;
+// }
+//
+// NoDestructor<T> stores the object inline, so it also avoids a pointer
+// indirection and a malloc. Also note that since C++11 static local variable
+// initialization is thread-safe and so is this pattern. Code should prefer to
+// use NoDestructor<T> over:
+// - A function scoped static T* or T& that is dynamically initialized.
+// - A global base::LazyInstance<T>.
+//
+// Note that since the destructor is never run, this *will* leak memory if used
+// as a stack or member variable. Furthermore, a NoDestructor<T> should never
+// have global scope as that may require a static initializer.
+template <typename T>
+class NoDestructor {
+ public:
+ // Not constexpr; just write static constexpr T x = ...; if the value should
+ // be a constexpr.
+ template <typename... Args>
+ explicit NoDestructor(Args&&... args) {
+ new (storage_) T(std::forward<Args>(args)...);
+ }
+
+ // Allows copy and move construction of the contained type, to allow
+ // construction from an initializer list, e.g. for std::vector.
+ explicit NoDestructor(const T& x) { new (storage_) T(x); }
+ explicit NoDestructor(T&& x) { new (storage_) T(std::move(x)); }
+
+ NoDestructor(const NoDestructor&) = delete;
+ NoDestructor& operator=(const NoDestructor&) = delete;
+
+ ~NoDestructor() = default;
+
+ const T& operator*() const { return *get(); }
+ T& operator*() { return *get(); }
+
+ const T* operator->() const { return get(); }
+ T* operator->() { return get(); }
+
+ const T* get() const { return reinterpret_cast<const T*>(storage_); }
+ T* get() { return reinterpret_cast<T*>(storage_); }
+
+ private:
+ alignas(T) char storage_[sizeof(T)];
+
+#if defined(LEAK_SANITIZER)
+ // TODO(https://crbug.com/812277): This is a hack to work around the fact
+ // that LSan doesn't seem to treat NoDestructor as a root for reachability
+ // analysis. This means that code like this:
+ // static base::NoDestructor<std::vector<int>> v({1, 2, 3});
+ // is considered a leak. Using the standard leak sanitizer annotations to
+ // suppress leaks doesn't work: std::vector is implicitly constructed before
+ // calling the base::NoDestructor constructor.
+ //
+ // Unfortunately, I haven't been able to demonstrate this issue in simpler
+ // reproductions: until that's resolved, hold an explicit pointer to the
+ // placement-new'd object in leak sanitizer mode to help LSan realize that
+ // objects allocated by the contained type are still reachable.
+ T* storage_ptr_ = reinterpret_cast<T*>(storage_);
+#endif // defined(LEAK_SANITIZER)
+};
+
+} // namespace base
+
+#endif // BASE_NO_DESTRUCTOR_H_
diff --git a/security/sandbox/chromium/base/numerics/checked_math.h b/security/sandbox/chromium/base/numerics/checked_math.h
new file mode 100644
index 0000000000..ede3344f82
--- /dev/null
+++ b/security/sandbox/chromium/base/numerics/checked_math.h
@@ -0,0 +1,393 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NUMERICS_CHECKED_MATH_H_
+#define BASE_NUMERICS_CHECKED_MATH_H_
+
+#include <stddef.h>
+
+#include <limits>
+#include <type_traits>
+
+#include "base/numerics/checked_math_impl.h"
+
+namespace base {
+namespace internal {
+
+template <typename T>
+class CheckedNumeric {
+ static_assert(std::is_arithmetic<T>::value,
+ "CheckedNumeric<T>: T must be a numeric type.");
+
+ public:
+ using type = T;
+
+ constexpr CheckedNumeric() = default;
+
+ // Copy constructor.
+ template <typename Src>
+ constexpr CheckedNumeric(const CheckedNumeric<Src>& rhs)
+ : state_(rhs.state_.value(), rhs.IsValid()) {}
+
+ template <typename Src>
+ friend class CheckedNumeric;
+
+ // This is not an explicit constructor because we implicitly upgrade regular
+ // numerics to CheckedNumerics to make them easier to use.
+ template <typename Src>
+ constexpr CheckedNumeric(Src value) // NOLINT(runtime/explicit)
+ : state_(value) {
+ static_assert(std::is_arithmetic<Src>::value, "Argument must be numeric.");
+ }
+
+ // This is not an explicit constructor because we want a seamless conversion
+ // from StrictNumeric types.
+ template <typename Src>
+ constexpr CheckedNumeric(
+ StrictNumeric<Src> value) // NOLINT(runtime/explicit)
+ : state_(static_cast<Src>(value)) {}
+
+ // IsValid() - The public API to test if a CheckedNumeric is currently valid.
+ // A range checked destination type can be supplied using the Dst template
+ // parameter.
+ template <typename Dst = T>
+ constexpr bool IsValid() const {
+ return state_.is_valid() &&
+ IsValueInRangeForNumericType<Dst>(state_.value());
+ }
+
+ // AssignIfValid(Dst) - Assigns the underlying value if it is currently valid
+ // and is within the range supported by the destination type. Returns true if
+ // successful and false otherwise.
+ template <typename Dst>
+#if defined(__clang__) || defined(__GNUC__)
+ __attribute__((warn_unused_result))
+#elif defined(_MSC_VER)
+ _Check_return_
+#endif
+ constexpr bool
+ AssignIfValid(Dst* result) const {
+ return BASE_NUMERICS_LIKELY(IsValid<Dst>())
+ ? ((*result = static_cast<Dst>(state_.value())), true)
+ : false;
+ }
+
+ // ValueOrDie() - The primary accessor for the underlying value. If the
+ // current state is not valid it will CHECK and crash.
+ // A range checked destination type can be supplied using the Dst template
+ // parameter, which will trigger a CHECK if the value is not in bounds for
+ // the destination.
+ // The CHECK behavior can be overridden by supplying a handler as a
+ // template parameter, for test code, etc. However, the handler cannot access
+ // the underlying value, and it is not available through other means.
+ template <typename Dst = T, class CheckHandler = CheckOnFailure>
+ constexpr StrictNumeric<Dst> ValueOrDie() const {
+ return BASE_NUMERICS_LIKELY(IsValid<Dst>())
+ ? static_cast<Dst>(state_.value())
+ : CheckHandler::template HandleFailure<Dst>();
+ }
+
+ // ValueOrDefault(T default_value) - A convenience method that returns the
+ // current value if the state is valid, and the supplied default_value for
+ // any other state.
+ // A range checked destination type can be supplied using the Dst template
+ // parameter. WARNING: This function may fail to compile or CHECK at runtime
+ // if the supplied default_value is not within range of the destination type.
+ template <typename Dst = T, typename Src>
+ constexpr StrictNumeric<Dst> ValueOrDefault(const Src default_value) const {
+ return BASE_NUMERICS_LIKELY(IsValid<Dst>())
+ ? static_cast<Dst>(state_.value())
+ : checked_cast<Dst>(default_value);
+ }
+
+ // Returns a checked numeric of the specified type, cast from the current
+ // CheckedNumeric. If the current state is invalid or the destination cannot
+ // represent the result then the returned CheckedNumeric will be invalid.
+ template <typename Dst>
+ constexpr CheckedNumeric<typename UnderlyingType<Dst>::type> Cast() const {
+ return *this;
+ }
+
+ // This friend method is available solely for providing more detailed logging
+ // in the the tests. Do not implement it in production code, because the
+ // underlying values may change at any time.
+ template <typename U>
+ friend U GetNumericValueForTest(const CheckedNumeric<U>& src);
+
+ // Prototypes for the supported arithmetic operator overloads.
+ template <typename Src>
+ constexpr CheckedNumeric& operator+=(const Src rhs);
+ template <typename Src>
+ constexpr CheckedNumeric& operator-=(const Src rhs);
+ template <typename Src>
+ constexpr CheckedNumeric& operator*=(const Src rhs);
+ template <typename Src>
+ constexpr CheckedNumeric& operator/=(const Src rhs);
+ template <typename Src>
+ constexpr CheckedNumeric& operator%=(const Src rhs);
+ template <typename Src>
+ constexpr CheckedNumeric& operator<<=(const Src rhs);
+ template <typename Src>
+ constexpr CheckedNumeric& operator>>=(const Src rhs);
+ template <typename Src>
+ constexpr CheckedNumeric& operator&=(const Src rhs);
+ template <typename Src>
+ constexpr CheckedNumeric& operator|=(const Src rhs);
+ template <typename Src>
+ constexpr CheckedNumeric& operator^=(const Src rhs);
+
+ constexpr CheckedNumeric operator-() const {
+ // The negation of two's complement int min is int min, so we simply
+ // check for that in the constexpr case.
+ // We use an optimized code path for a known run-time variable.
+ return MustTreatAsConstexpr(state_.value()) || !std::is_signed<T>::value ||
+ std::is_floating_point<T>::value
+ ? CheckedNumeric<T>(
+ NegateWrapper(state_.value()),
+ IsValid() && (!std::is_signed<T>::value ||
+ std::is_floating_point<T>::value ||
+ NegateWrapper(state_.value()) !=
+ std::numeric_limits<T>::lowest()))
+ : FastRuntimeNegate();
+ }
+
+ constexpr CheckedNumeric operator~() const {
+ return CheckedNumeric<decltype(InvertWrapper(T()))>(
+ InvertWrapper(state_.value()), IsValid());
+ }
+
+ constexpr CheckedNumeric Abs() const {
+ return !IsValueNegative(state_.value()) ? *this : -*this;
+ }
+
+ template <typename U>
+ constexpr CheckedNumeric<typename MathWrapper<CheckedMaxOp, T, U>::type> Max(
+ const U rhs) const {
+ using R = typename UnderlyingType<U>::type;
+ using result_type = typename MathWrapper<CheckedMaxOp, T, U>::type;
+ // TODO(jschuh): This can be converted to the MathOp version and remain
+ // constexpr once we have C++14 support.
+ return CheckedNumeric<result_type>(
+ static_cast<result_type>(
+ IsGreater<T, R>::Test(state_.value(), Wrapper<U>::value(rhs))
+ ? state_.value()
+ : Wrapper<U>::value(rhs)),
+ state_.is_valid() && Wrapper<U>::is_valid(rhs));
+ }
+
+ template <typename U>
+ constexpr CheckedNumeric<typename MathWrapper<CheckedMinOp, T, U>::type> Min(
+ const U rhs) const {
+ using R = typename UnderlyingType<U>::type;
+ using result_type = typename MathWrapper<CheckedMinOp, T, U>::type;
+ // TODO(jschuh): This can be converted to the MathOp version and remain
+ // constexpr once we have C++14 support.
+ return CheckedNumeric<result_type>(
+ static_cast<result_type>(
+ IsLess<T, R>::Test(state_.value(), Wrapper<U>::value(rhs))
+ ? state_.value()
+ : Wrapper<U>::value(rhs)),
+ state_.is_valid() && Wrapper<U>::is_valid(rhs));
+ }
+
+ // This function is available only for integral types. It returns an unsigned
+ // integer of the same width as the source type, containing the absolute value
+ // of the source, and properly handling signed min.
+ constexpr CheckedNumeric<typename UnsignedOrFloatForSize<T>::type>
+ UnsignedAbs() const {
+ return CheckedNumeric<typename UnsignedOrFloatForSize<T>::type>(
+ SafeUnsignedAbs(state_.value()), state_.is_valid());
+ }
+
+ constexpr CheckedNumeric& operator++() {
+ *this += 1;
+ return *this;
+ }
+
+ constexpr CheckedNumeric operator++(int) {
+ CheckedNumeric value = *this;
+ *this += 1;
+ return value;
+ }
+
+ constexpr CheckedNumeric& operator--() {
+ *this -= 1;
+ return *this;
+ }
+
+ constexpr CheckedNumeric operator--(int) {
+ CheckedNumeric value = *this;
+ *this -= 1;
+ return value;
+ }
+
+ // These perform the actual math operations on the CheckedNumerics.
+ // Binary arithmetic operations.
+ template <template <typename, typename, typename> class M,
+ typename L,
+ typename R>
+ static constexpr CheckedNumeric MathOp(const L lhs, const R rhs) {
+ using Math = typename MathWrapper<M, L, R>::math;
+ T result = 0;
+ bool is_valid =
+ Wrapper<L>::is_valid(lhs) && Wrapper<R>::is_valid(rhs) &&
+ Math::Do(Wrapper<L>::value(lhs), Wrapper<R>::value(rhs), &result);
+ return CheckedNumeric<T>(result, is_valid);
+ }
+
+ // Assignment arithmetic operations.
+ template <template <typename, typename, typename> class M, typename R>
+ constexpr CheckedNumeric& MathOp(const R rhs) {
+ using Math = typename MathWrapper<M, T, R>::math;
+ T result = 0; // Using T as the destination saves a range check.
+ bool is_valid = state_.is_valid() && Wrapper<R>::is_valid(rhs) &&
+ Math::Do(state_.value(), Wrapper<R>::value(rhs), &result);
+ *this = CheckedNumeric<T>(result, is_valid);
+ return *this;
+ }
+
+ private:
+ CheckedNumericState<T> state_;
+
+ CheckedNumeric FastRuntimeNegate() const {
+ T result;
+ bool success = CheckedSubOp<T, T>::Do(T(0), state_.value(), &result);
+ return CheckedNumeric<T>(result, IsValid() && success);
+ }
+
+ template <typename Src>
+ constexpr CheckedNumeric(Src value, bool is_valid)
+ : state_(value, is_valid) {}
+
+ // These wrappers allow us to handle state the same way for both
+ // CheckedNumeric and POD arithmetic types.
+ template <typename Src>
+ struct Wrapper {
+ static constexpr bool is_valid(Src) { return true; }
+ static constexpr Src value(Src value) { return value; }
+ };
+
+ template <typename Src>
+ struct Wrapper<CheckedNumeric<Src>> {
+ static constexpr bool is_valid(const CheckedNumeric<Src> v) {
+ return v.IsValid();
+ }
+ static constexpr Src value(const CheckedNumeric<Src> v) {
+ return v.state_.value();
+ }
+ };
+
+ template <typename Src>
+ struct Wrapper<StrictNumeric<Src>> {
+ static constexpr bool is_valid(const StrictNumeric<Src>) { return true; }
+ static constexpr Src value(const StrictNumeric<Src> v) {
+ return static_cast<Src>(v);
+ }
+ };
+};
+
+// Convenience functions to avoid the ugly template disambiguator syntax.
+template <typename Dst, typename Src>
+constexpr bool IsValidForType(const CheckedNumeric<Src> value) {
+ return value.template IsValid<Dst>();
+}
+
+template <typename Dst, typename Src>
+constexpr StrictNumeric<Dst> ValueOrDieForType(
+ const CheckedNumeric<Src> value) {
+ return value.template ValueOrDie<Dst>();
+}
+
+template <typename Dst, typename Src, typename Default>
+constexpr StrictNumeric<Dst> ValueOrDefaultForType(
+ const CheckedNumeric<Src> value,
+ const Default default_value) {
+ return value.template ValueOrDefault<Dst>(default_value);
+}
+
+// Convience wrapper to return a new CheckedNumeric from the provided arithmetic
+// or CheckedNumericType.
+template <typename T>
+constexpr CheckedNumeric<typename UnderlyingType<T>::type> MakeCheckedNum(
+ const T value) {
+ return value;
+}
+
+// These implement the variadic wrapper for the math operations.
+template <template <typename, typename, typename> class M,
+ typename L,
+ typename R>
+constexpr CheckedNumeric<typename MathWrapper<M, L, R>::type> CheckMathOp(
+ const L lhs,
+ const R rhs) {
+ using Math = typename MathWrapper<M, L, R>::math;
+ return CheckedNumeric<typename Math::result_type>::template MathOp<M>(lhs,
+ rhs);
+}
+
+// General purpose wrapper template for arithmetic operations.
+template <template <typename, typename, typename> class M,
+ typename L,
+ typename R,
+ typename... Args>
+constexpr CheckedNumeric<typename ResultType<M, L, R, Args...>::type>
+CheckMathOp(const L lhs, const R rhs, const Args... args) {
+ return CheckMathOp<M>(CheckMathOp<M>(lhs, rhs), args...);
+}
+
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Add, +, +=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Sub, -, -=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Mul, *, *=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Div, /, /=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Mod, %, %=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Lsh, <<, <<=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Rsh, >>, >>=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, And, &, &=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Or, |, |=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Xor, ^, ^=)
+BASE_NUMERIC_ARITHMETIC_VARIADIC(Checked, Check, Max)
+BASE_NUMERIC_ARITHMETIC_VARIADIC(Checked, Check, Min)
+
+// These are some extra StrictNumeric operators to support simple pointer
+// arithmetic with our result types. Since wrapping on a pointer is always
+// bad, we trigger the CHECK condition here.
+template <typename L, typename R>
+L* operator+(L* lhs, const StrictNumeric<R> rhs) {
+ uintptr_t result = CheckAdd(reinterpret_cast<uintptr_t>(lhs),
+ CheckMul(sizeof(L), static_cast<R>(rhs)))
+ .template ValueOrDie<uintptr_t>();
+ return reinterpret_cast<L*>(result);
+}
+
+template <typename L, typename R>
+L* operator-(L* lhs, const StrictNumeric<R> rhs) {
+ uintptr_t result = CheckSub(reinterpret_cast<uintptr_t>(lhs),
+ CheckMul(sizeof(L), static_cast<R>(rhs)))
+ .template ValueOrDie<uintptr_t>();
+ return reinterpret_cast<L*>(result);
+}
+
+} // namespace internal
+
+using internal::CheckedNumeric;
+using internal::IsValidForType;
+using internal::ValueOrDieForType;
+using internal::ValueOrDefaultForType;
+using internal::MakeCheckedNum;
+using internal::CheckMax;
+using internal::CheckMin;
+using internal::CheckAdd;
+using internal::CheckSub;
+using internal::CheckMul;
+using internal::CheckDiv;
+using internal::CheckMod;
+using internal::CheckLsh;
+using internal::CheckRsh;
+using internal::CheckAnd;
+using internal::CheckOr;
+using internal::CheckXor;
+
+} // namespace base
+
+#endif // BASE_NUMERICS_CHECKED_MATH_H_
diff --git a/security/sandbox/chromium/base/numerics/checked_math_impl.h b/security/sandbox/chromium/base/numerics/checked_math_impl.h
new file mode 100644
index 0000000000..e083389ebf
--- /dev/null
+++ b/security/sandbox/chromium/base/numerics/checked_math_impl.h
@@ -0,0 +1,567 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NUMERICS_CHECKED_MATH_IMPL_H_
+#define BASE_NUMERICS_CHECKED_MATH_IMPL_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <climits>
+#include <cmath>
+#include <cstdlib>
+#include <limits>
+#include <type_traits>
+
+#include "base/numerics/safe_conversions.h"
+#include "base/numerics/safe_math_shared_impl.h"
+
+namespace base {
+namespace internal {
+
+template <typename T>
+constexpr bool CheckedAddImpl(T x, T y, T* result) {
+ static_assert(std::is_integral<T>::value, "Type must be integral");
+ // Since the value of x+y is undefined if we have a signed type, we compute
+ // it using the unsigned type of the same size.
+ using UnsignedDst = typename std::make_unsigned<T>::type;
+ using SignedDst = typename std::make_signed<T>::type;
+ UnsignedDst ux = static_cast<UnsignedDst>(x);
+ UnsignedDst uy = static_cast<UnsignedDst>(y);
+ UnsignedDst uresult = static_cast<UnsignedDst>(ux + uy);
+ *result = static_cast<T>(uresult);
+ // Addition is valid if the sign of (x + y) is equal to either that of x or
+ // that of y.
+ return (std::is_signed<T>::value)
+ ? static_cast<SignedDst>((uresult ^ ux) & (uresult ^ uy)) >= 0
+ : uresult >= uy; // Unsigned is either valid or underflow.
+}
+
+template <typename T, typename U, class Enable = void>
+struct CheckedAddOp {};
+
+template <typename T, typename U>
+struct CheckedAddOp<T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value &&
+ std::is_integral<U>::value>::type> {
+ using result_type = typename MaxExponentPromotion<T, U>::type;
+ template <typename V>
+ static constexpr bool Do(T x, U y, V* result) {
+ // TODO(jschuh) Make this "constexpr if" once we're C++17.
+ if (CheckedAddFastOp<T, U>::is_supported)
+ return CheckedAddFastOp<T, U>::Do(x, y, result);
+
+ // Double the underlying type up to a full machine word.
+ using FastPromotion = typename FastIntegerArithmeticPromotion<T, U>::type;
+ using Promotion =
+ typename std::conditional<(IntegerBitsPlusSign<FastPromotion>::value >
+ IntegerBitsPlusSign<intptr_t>::value),
+ typename BigEnoughPromotion<T, U>::type,
+ FastPromotion>::type;
+ // Fail if either operand is out of range for the promoted type.
+ // TODO(jschuh): This could be made to work for a broader range of values.
+ if (BASE_NUMERICS_UNLIKELY(!IsValueInRangeForNumericType<Promotion>(x) ||
+ !IsValueInRangeForNumericType<Promotion>(y))) {
+ return false;
+ }
+
+ Promotion presult = {};
+ bool is_valid = true;
+ if (IsIntegerArithmeticSafe<Promotion, T, U>::value) {
+ presult = static_cast<Promotion>(x) + static_cast<Promotion>(y);
+ } else {
+ is_valid = CheckedAddImpl(static_cast<Promotion>(x),
+ static_cast<Promotion>(y), &presult);
+ }
+ *result = static_cast<V>(presult);
+ return is_valid && IsValueInRangeForNumericType<V>(presult);
+ }
+};
+
+template <typename T>
+constexpr bool CheckedSubImpl(T x, T y, T* result) {
+ static_assert(std::is_integral<T>::value, "Type must be integral");
+ // Since the value of x+y is undefined if we have a signed type, we compute
+ // it using the unsigned type of the same size.
+ using UnsignedDst = typename std::make_unsigned<T>::type;
+ using SignedDst = typename std::make_signed<T>::type;
+ UnsignedDst ux = static_cast<UnsignedDst>(x);
+ UnsignedDst uy = static_cast<UnsignedDst>(y);
+ UnsignedDst uresult = static_cast<UnsignedDst>(ux - uy);
+ *result = static_cast<T>(uresult);
+ // Subtraction is valid if either x and y have same sign, or (x-y) and x have
+ // the same sign.
+ return (std::is_signed<T>::value)
+ ? static_cast<SignedDst>((uresult ^ ux) & (ux ^ uy)) >= 0
+ : x >= y;
+}
+
+template <typename T, typename U, class Enable = void>
+struct CheckedSubOp {};
+
+template <typename T, typename U>
+struct CheckedSubOp<T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value &&
+ std::is_integral<U>::value>::type> {
+ using result_type = typename MaxExponentPromotion<T, U>::type;
+ template <typename V>
+ static constexpr bool Do(T x, U y, V* result) {
+ // TODO(jschuh) Make this "constexpr if" once we're C++17.
+ if (CheckedSubFastOp<T, U>::is_supported)
+ return CheckedSubFastOp<T, U>::Do(x, y, result);
+
+ // Double the underlying type up to a full machine word.
+ using FastPromotion = typename FastIntegerArithmeticPromotion<T, U>::type;
+ using Promotion =
+ typename std::conditional<(IntegerBitsPlusSign<FastPromotion>::value >
+ IntegerBitsPlusSign<intptr_t>::value),
+ typename BigEnoughPromotion<T, U>::type,
+ FastPromotion>::type;
+ // Fail if either operand is out of range for the promoted type.
+ // TODO(jschuh): This could be made to work for a broader range of values.
+ if (BASE_NUMERICS_UNLIKELY(!IsValueInRangeForNumericType<Promotion>(x) ||
+ !IsValueInRangeForNumericType<Promotion>(y))) {
+ return false;
+ }
+
+ Promotion presult = {};
+ bool is_valid = true;
+ if (IsIntegerArithmeticSafe<Promotion, T, U>::value) {
+ presult = static_cast<Promotion>(x) - static_cast<Promotion>(y);
+ } else {
+ is_valid = CheckedSubImpl(static_cast<Promotion>(x),
+ static_cast<Promotion>(y), &presult);
+ }
+ *result = static_cast<V>(presult);
+ return is_valid && IsValueInRangeForNumericType<V>(presult);
+ }
+};
+
+template <typename T>
+constexpr bool CheckedMulImpl(T x, T y, T* result) {
+ static_assert(std::is_integral<T>::value, "Type must be integral");
+ // Since the value of x*y is potentially undefined if we have a signed type,
+ // we compute it using the unsigned type of the same size.
+ using UnsignedDst = typename std::make_unsigned<T>::type;
+ using SignedDst = typename std::make_signed<T>::type;
+ const UnsignedDst ux = SafeUnsignedAbs(x);
+ const UnsignedDst uy = SafeUnsignedAbs(y);
+ UnsignedDst uresult = static_cast<UnsignedDst>(ux * uy);
+ const bool is_negative =
+ std::is_signed<T>::value && static_cast<SignedDst>(x ^ y) < 0;
+ *result = is_negative ? 0 - uresult : uresult;
+ // We have a fast out for unsigned identity or zero on the second operand.
+ // After that it's an unsigned overflow check on the absolute value, with
+ // a +1 bound for a negative result.
+ return uy <= UnsignedDst(!std::is_signed<T>::value || is_negative) ||
+ ux <= (std::numeric_limits<T>::max() + UnsignedDst(is_negative)) / uy;
+}
+
+template <typename T, typename U, class Enable = void>
+struct CheckedMulOp {};
+
+template <typename T, typename U>
+struct CheckedMulOp<T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value &&
+ std::is_integral<U>::value>::type> {
+ using result_type = typename MaxExponentPromotion<T, U>::type;
+ template <typename V>
+ static constexpr bool Do(T x, U y, V* result) {
+ // TODO(jschuh) Make this "constexpr if" once we're C++17.
+ if (CheckedMulFastOp<T, U>::is_supported)
+ return CheckedMulFastOp<T, U>::Do(x, y, result);
+
+ using Promotion = typename FastIntegerArithmeticPromotion<T, U>::type;
+ // Verify the destination type can hold the result (always true for 0).
+ if (BASE_NUMERICS_UNLIKELY((!IsValueInRangeForNumericType<Promotion>(x) ||
+ !IsValueInRangeForNumericType<Promotion>(y)) &&
+ x && y)) {
+ return false;
+ }
+
+ Promotion presult = {};
+ bool is_valid = true;
+ if (CheckedMulFastOp<Promotion, Promotion>::is_supported) {
+ // The fast op may be available with the promoted type.
+ is_valid = CheckedMulFastOp<Promotion, Promotion>::Do(x, y, &presult);
+ } else if (IsIntegerArithmeticSafe<Promotion, T, U>::value) {
+ presult = static_cast<Promotion>(x) * static_cast<Promotion>(y);
+ } else {
+ is_valid = CheckedMulImpl(static_cast<Promotion>(x),
+ static_cast<Promotion>(y), &presult);
+ }
+ *result = static_cast<V>(presult);
+ return is_valid && IsValueInRangeForNumericType<V>(presult);
+ }
+};
+
+// Division just requires a check for a zero denominator or an invalid negation
+// on signed min/-1.
+template <typename T, typename U, class Enable = void>
+struct CheckedDivOp {};
+
+template <typename T, typename U>
+struct CheckedDivOp<T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value &&
+ std::is_integral<U>::value>::type> {
+ using result_type = typename MaxExponentPromotion<T, U>::type;
+ template <typename V>
+ static constexpr bool Do(T x, U y, V* result) {
+ if (BASE_NUMERICS_UNLIKELY(!y))
+ return false;
+
+ // The overflow check can be compiled away if we don't have the exact
+ // combination of types needed to trigger this case.
+ using Promotion = typename BigEnoughPromotion<T, U>::type;
+ if (BASE_NUMERICS_UNLIKELY(
+ (std::is_signed<T>::value && std::is_signed<U>::value &&
+ IsTypeInRangeForNumericType<T, Promotion>::value &&
+ static_cast<Promotion>(x) ==
+ std::numeric_limits<Promotion>::lowest() &&
+ y == static_cast<U>(-1)))) {
+ return false;
+ }
+
+ // This branch always compiles away if the above branch wasn't removed.
+ if (BASE_NUMERICS_UNLIKELY((!IsValueInRangeForNumericType<Promotion>(x) ||
+ !IsValueInRangeForNumericType<Promotion>(y)) &&
+ x)) {
+ return false;
+ }
+
+ Promotion presult = Promotion(x) / Promotion(y);
+ *result = static_cast<V>(presult);
+ return IsValueInRangeForNumericType<V>(presult);
+ }
+};
+
+template <typename T, typename U, class Enable = void>
+struct CheckedModOp {};
+
+template <typename T, typename U>
+struct CheckedModOp<T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value &&
+ std::is_integral<U>::value>::type> {
+ using result_type = typename MaxExponentPromotion<T, U>::type;
+ template <typename V>
+ static constexpr bool Do(T x, U y, V* result) {
+ using Promotion = typename BigEnoughPromotion<T, U>::type;
+ if (BASE_NUMERICS_LIKELY(y)) {
+ Promotion presult = static_cast<Promotion>(x) % static_cast<Promotion>(y);
+ *result = static_cast<Promotion>(presult);
+ return IsValueInRangeForNumericType<V>(presult);
+ }
+ return false;
+ }
+};
+
+template <typename T, typename U, class Enable = void>
+struct CheckedLshOp {};
+
+// Left shift. Shifts less than 0 or greater than or equal to the number
+// of bits in the promoted type are undefined. Shifts of negative values
+// are undefined. Otherwise it is defined when the result fits.
+template <typename T, typename U>
+struct CheckedLshOp<T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value &&
+ std::is_integral<U>::value>::type> {
+ using result_type = T;
+ template <typename V>
+ static constexpr bool Do(T x, U shift, V* result) {
+ // Disallow negative numbers and verify the shift is in bounds.
+ if (BASE_NUMERICS_LIKELY(!IsValueNegative(x) &&
+ as_unsigned(shift) <
+ as_unsigned(std::numeric_limits<T>::digits))) {
+ // Shift as unsigned to avoid undefined behavior.
+ *result = static_cast<V>(as_unsigned(x) << shift);
+ // If the shift can be reversed, we know it was valid.
+ return *result >> shift == x;
+ }
+
+ // Handle the legal corner-case of a full-width signed shift of zero.
+ return std::is_signed<T>::value && !x &&
+ as_unsigned(shift) == as_unsigned(std::numeric_limits<T>::digits);
+ }
+};
+
+template <typename T, typename U, class Enable = void>
+struct CheckedRshOp {};
+
+// Right shift. Shifts less than 0 or greater than or equal to the number
+// of bits in the promoted type are undefined. Otherwise, it is always defined,
+// but a right shift of a negative value is implementation-dependent.
+template <typename T, typename U>
+struct CheckedRshOp<T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value &&
+ std::is_integral<U>::value>::type> {
+ using result_type = T;
+ template <typename V>
+ static bool Do(T x, U shift, V* result) {
+ // Use the type conversion push negative values out of range.
+ if (BASE_NUMERICS_LIKELY(as_unsigned(shift) <
+ IntegerBitsPlusSign<T>::value)) {
+ T tmp = x >> shift;
+ *result = static_cast<V>(tmp);
+ return IsValueInRangeForNumericType<V>(tmp);
+ }
+ return false;
+ }
+};
+
+template <typename T, typename U, class Enable = void>
+struct CheckedAndOp {};
+
+// For simplicity we support only unsigned integer results.
+template <typename T, typename U>
+struct CheckedAndOp<T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value &&
+ std::is_integral<U>::value>::type> {
+ using result_type = typename std::make_unsigned<
+ typename MaxExponentPromotion<T, U>::type>::type;
+ template <typename V>
+ static constexpr bool Do(T x, U y, V* result) {
+ result_type tmp = static_cast<result_type>(x) & static_cast<result_type>(y);
+ *result = static_cast<V>(tmp);
+ return IsValueInRangeForNumericType<V>(tmp);
+ }
+};
+
+template <typename T, typename U, class Enable = void>
+struct CheckedOrOp {};
+
+// For simplicity we support only unsigned integers.
+template <typename T, typename U>
+struct CheckedOrOp<T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value &&
+ std::is_integral<U>::value>::type> {
+ using result_type = typename std::make_unsigned<
+ typename MaxExponentPromotion<T, U>::type>::type;
+ template <typename V>
+ static constexpr bool Do(T x, U y, V* result) {
+ result_type tmp = static_cast<result_type>(x) | static_cast<result_type>(y);
+ *result = static_cast<V>(tmp);
+ return IsValueInRangeForNumericType<V>(tmp);
+ }
+};
+
+template <typename T, typename U, class Enable = void>
+struct CheckedXorOp {};
+
+// For simplicity we support only unsigned integers.
+template <typename T, typename U>
+struct CheckedXorOp<T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value &&
+ std::is_integral<U>::value>::type> {
+ using result_type = typename std::make_unsigned<
+ typename MaxExponentPromotion<T, U>::type>::type;
+ template <typename V>
+ static constexpr bool Do(T x, U y, V* result) {
+ result_type tmp = static_cast<result_type>(x) ^ static_cast<result_type>(y);
+ *result = static_cast<V>(tmp);
+ return IsValueInRangeForNumericType<V>(tmp);
+ }
+};
+
+// Max doesn't really need to be implemented this way because it can't fail,
+// but it makes the code much cleaner to use the MathOp wrappers.
+template <typename T, typename U, class Enable = void>
+struct CheckedMaxOp {};
+
+template <typename T, typename U>
+struct CheckedMaxOp<
+ T,
+ U,
+ typename std::enable_if<std::is_arithmetic<T>::value &&
+ std::is_arithmetic<U>::value>::type> {
+ using result_type = typename MaxExponentPromotion<T, U>::type;
+ template <typename V>
+ static constexpr bool Do(T x, U y, V* result) {
+ result_type tmp = IsGreater<T, U>::Test(x, y) ? static_cast<result_type>(x)
+ : static_cast<result_type>(y);
+ *result = static_cast<V>(tmp);
+ return IsValueInRangeForNumericType<V>(tmp);
+ }
+};
+
+// Min doesn't really need to be implemented this way because it can't fail,
+// but it makes the code much cleaner to use the MathOp wrappers.
+template <typename T, typename U, class Enable = void>
+struct CheckedMinOp {};
+
+template <typename T, typename U>
+struct CheckedMinOp<
+ T,
+ U,
+ typename std::enable_if<std::is_arithmetic<T>::value &&
+ std::is_arithmetic<U>::value>::type> {
+ using result_type = typename LowestValuePromotion<T, U>::type;
+ template <typename V>
+ static constexpr bool Do(T x, U y, V* result) {
+ result_type tmp = IsLess<T, U>::Test(x, y) ? static_cast<result_type>(x)
+ : static_cast<result_type>(y);
+ *result = static_cast<V>(tmp);
+ return IsValueInRangeForNumericType<V>(tmp);
+ }
+};
+
+// This is just boilerplate that wraps the standard floating point arithmetic.
+// A macro isn't the nicest solution, but it beats rewriting these repeatedly.
+#define BASE_FLOAT_ARITHMETIC_OPS(NAME, OP) \
+ template <typename T, typename U> \
+ struct Checked##NAME##Op< \
+ T, U, \
+ typename std::enable_if<std::is_floating_point<T>::value || \
+ std::is_floating_point<U>::value>::type> { \
+ using result_type = typename MaxExponentPromotion<T, U>::type; \
+ template <typename V> \
+ static constexpr bool Do(T x, U y, V* result) { \
+ using Promotion = typename MaxExponentPromotion<T, U>::type; \
+ Promotion presult = x OP y; \
+ *result = static_cast<V>(presult); \
+ return IsValueInRangeForNumericType<V>(presult); \
+ } \
+ };
+
+BASE_FLOAT_ARITHMETIC_OPS(Add, +)
+BASE_FLOAT_ARITHMETIC_OPS(Sub, -)
+BASE_FLOAT_ARITHMETIC_OPS(Mul, *)
+BASE_FLOAT_ARITHMETIC_OPS(Div, /)
+
+#undef BASE_FLOAT_ARITHMETIC_OPS
+
+// Floats carry around their validity state with them, but integers do not. So,
+// we wrap the underlying value in a specialization in order to hide that detail
+// and expose an interface via accessors.
+enum NumericRepresentation {
+ NUMERIC_INTEGER,
+ NUMERIC_FLOATING,
+ NUMERIC_UNKNOWN
+};
+
+template <typename NumericType>
+struct GetNumericRepresentation {
+ static const NumericRepresentation value =
+ std::is_integral<NumericType>::value
+ ? NUMERIC_INTEGER
+ : (std::is_floating_point<NumericType>::value ? NUMERIC_FLOATING
+ : NUMERIC_UNKNOWN);
+};
+
+template <typename T,
+ NumericRepresentation type = GetNumericRepresentation<T>::value>
+class CheckedNumericState {};
+
+// Integrals require quite a bit of additional housekeeping to manage state.
+template <typename T>
+class CheckedNumericState<T, NUMERIC_INTEGER> {
+ private:
+ // is_valid_ precedes value_ because member intializers in the constructors
+ // are evaluated in field order, and is_valid_ must be read when initializing
+ // value_.
+ bool is_valid_;
+ T value_;
+
+ // Ensures that a type conversion does not trigger undefined behavior.
+ template <typename Src>
+ static constexpr T WellDefinedConversionOrZero(const Src value,
+ const bool is_valid) {
+ using SrcType = typename internal::UnderlyingType<Src>::type;
+ return (std::is_integral<SrcType>::value || is_valid)
+ ? static_cast<T>(value)
+ : static_cast<T>(0);
+ }
+
+ public:
+ template <typename Src, NumericRepresentation type>
+ friend class CheckedNumericState;
+
+ constexpr CheckedNumericState() : is_valid_(true), value_(0) {}
+
+ template <typename Src>
+ constexpr CheckedNumericState(Src value, bool is_valid)
+ : is_valid_(is_valid && IsValueInRangeForNumericType<T>(value)),
+ value_(WellDefinedConversionOrZero(value, is_valid_)) {
+ static_assert(std::is_arithmetic<Src>::value, "Argument must be numeric.");
+ }
+
+ // Copy constructor.
+ template <typename Src>
+ constexpr CheckedNumericState(const CheckedNumericState<Src>& rhs)
+ : is_valid_(rhs.IsValid()),
+ value_(WellDefinedConversionOrZero(rhs.value(), is_valid_)) {}
+
+ template <typename Src>
+ constexpr explicit CheckedNumericState(Src value)
+ : is_valid_(IsValueInRangeForNumericType<T>(value)),
+ value_(WellDefinedConversionOrZero(value, is_valid_)) {}
+
+ constexpr bool is_valid() const { return is_valid_; }
+ constexpr T value() const { return value_; }
+};
+
+// Floating points maintain their own validity, but need translation wrappers.
+template <typename T>
+class CheckedNumericState<T, NUMERIC_FLOATING> {
+ private:
+ T value_;
+
+ // Ensures that a type conversion does not trigger undefined behavior.
+ template <typename Src>
+ static constexpr T WellDefinedConversionOrNaN(const Src value,
+ const bool is_valid) {
+ using SrcType = typename internal::UnderlyingType<Src>::type;
+ return (StaticDstRangeRelationToSrcRange<T, SrcType>::value ==
+ NUMERIC_RANGE_CONTAINED ||
+ is_valid)
+ ? static_cast<T>(value)
+ : std::numeric_limits<T>::quiet_NaN();
+ }
+
+ public:
+ template <typename Src, NumericRepresentation type>
+ friend class CheckedNumericState;
+
+ constexpr CheckedNumericState() : value_(0.0) {}
+
+ template <typename Src>
+ constexpr CheckedNumericState(Src value, bool is_valid)
+ : value_(WellDefinedConversionOrNaN(value, is_valid)) {}
+
+ template <typename Src>
+ constexpr explicit CheckedNumericState(Src value)
+ : value_(WellDefinedConversionOrNaN(
+ value,
+ IsValueInRangeForNumericType<T>(value))) {}
+
+ // Copy constructor.
+ template <typename Src>
+ constexpr CheckedNumericState(const CheckedNumericState<Src>& rhs)
+ : value_(WellDefinedConversionOrNaN(
+ rhs.value(),
+ rhs.is_valid() && IsValueInRangeForNumericType<T>(rhs.value()))) {}
+
+ constexpr bool is_valid() const {
+ // Written this way because std::isfinite is not reliably constexpr.
+ return MustTreatAsConstexpr(value_)
+ ? value_ <= std::numeric_limits<T>::max() &&
+ value_ >= std::numeric_limits<T>::lowest()
+ : std::isfinite(value_);
+ }
+ constexpr T value() const { return value_; }
+};
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_NUMERICS_CHECKED_MATH_IMPL_H_
diff --git a/security/sandbox/chromium/base/numerics/clamped_math.h b/security/sandbox/chromium/base/numerics/clamped_math.h
new file mode 100644
index 0000000000..37a4cfd22a
--- /dev/null
+++ b/security/sandbox/chromium/base/numerics/clamped_math.h
@@ -0,0 +1,264 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NUMERICS_CLAMPED_MATH_H_
+#define BASE_NUMERICS_CLAMPED_MATH_H_
+
+#include <stddef.h>
+
+#include <limits>
+#include <type_traits>
+
+#include "base/numerics/clamped_math_impl.h"
+
+namespace base {
+namespace internal {
+
+template <typename T>
+class ClampedNumeric {
+ static_assert(std::is_arithmetic<T>::value,
+ "ClampedNumeric<T>: T must be a numeric type.");
+
+ public:
+ using type = T;
+
+ constexpr ClampedNumeric() : value_(0) {}
+
+ // Copy constructor.
+ template <typename Src>
+ constexpr ClampedNumeric(const ClampedNumeric<Src>& rhs)
+ : value_(saturated_cast<T>(rhs.value_)) {}
+
+ template <typename Src>
+ friend class ClampedNumeric;
+
+ // This is not an explicit constructor because we implicitly upgrade regular
+ // numerics to ClampedNumerics to make them easier to use.
+ template <typename Src>
+ constexpr ClampedNumeric(Src value) // NOLINT(runtime/explicit)
+ : value_(saturated_cast<T>(value)) {
+ static_assert(std::is_arithmetic<Src>::value, "Argument must be numeric.");
+ }
+
+ // This is not an explicit constructor because we want a seamless conversion
+ // from StrictNumeric types.
+ template <typename Src>
+ constexpr ClampedNumeric(
+ StrictNumeric<Src> value) // NOLINT(runtime/explicit)
+ : value_(saturated_cast<T>(static_cast<Src>(value))) {}
+
+ // Returns a ClampedNumeric of the specified type, cast from the current
+ // ClampedNumeric, and saturated to the destination type.
+ template <typename Dst>
+ constexpr ClampedNumeric<typename UnderlyingType<Dst>::type> Cast() const {
+ return *this;
+ }
+
+ // Prototypes for the supported arithmetic operator overloads.
+ template <typename Src>
+ constexpr ClampedNumeric& operator+=(const Src rhs);
+ template <typename Src>
+ constexpr ClampedNumeric& operator-=(const Src rhs);
+ template <typename Src>
+ constexpr ClampedNumeric& operator*=(const Src rhs);
+ template <typename Src>
+ constexpr ClampedNumeric& operator/=(const Src rhs);
+ template <typename Src>
+ constexpr ClampedNumeric& operator%=(const Src rhs);
+ template <typename Src>
+ constexpr ClampedNumeric& operator<<=(const Src rhs);
+ template <typename Src>
+ constexpr ClampedNumeric& operator>>=(const Src rhs);
+ template <typename Src>
+ constexpr ClampedNumeric& operator&=(const Src rhs);
+ template <typename Src>
+ constexpr ClampedNumeric& operator|=(const Src rhs);
+ template <typename Src>
+ constexpr ClampedNumeric& operator^=(const Src rhs);
+
+ constexpr ClampedNumeric operator-() const {
+ // The negation of two's complement int min is int min, so that's the
+ // only overflow case where we will saturate.
+ return ClampedNumeric<T>(SaturatedNegWrapper(value_));
+ }
+
+ constexpr ClampedNumeric operator~() const {
+ return ClampedNumeric<decltype(InvertWrapper(T()))>(InvertWrapper(value_));
+ }
+
+ constexpr ClampedNumeric Abs() const {
+ // The negation of two's complement int min is int min, so that's the
+ // only overflow case where we will saturate.
+ return ClampedNumeric<T>(SaturatedAbsWrapper(value_));
+ }
+
+ template <typename U>
+ constexpr ClampedNumeric<typename MathWrapper<ClampedMaxOp, T, U>::type> Max(
+ const U rhs) const {
+ using result_type = typename MathWrapper<ClampedMaxOp, T, U>::type;
+ return ClampedNumeric<result_type>(
+ ClampedMaxOp<T, U>::Do(value_, Wrapper<U>::value(rhs)));
+ }
+
+ template <typename U>
+ constexpr ClampedNumeric<typename MathWrapper<ClampedMinOp, T, U>::type> Min(
+ const U rhs) const {
+ using result_type = typename MathWrapper<ClampedMinOp, T, U>::type;
+ return ClampedNumeric<result_type>(
+ ClampedMinOp<T, U>::Do(value_, Wrapper<U>::value(rhs)));
+ }
+
+ // This function is available only for integral types. It returns an unsigned
+ // integer of the same width as the source type, containing the absolute value
+ // of the source, and properly handling signed min.
+ constexpr ClampedNumeric<typename UnsignedOrFloatForSize<T>::type>
+ UnsignedAbs() const {
+ return ClampedNumeric<typename UnsignedOrFloatForSize<T>::type>(
+ SafeUnsignedAbs(value_));
+ }
+
+ constexpr ClampedNumeric& operator++() {
+ *this += 1;
+ return *this;
+ }
+
+ constexpr ClampedNumeric operator++(int) {
+ ClampedNumeric value = *this;
+ *this += 1;
+ return value;
+ }
+
+ constexpr ClampedNumeric& operator--() {
+ *this -= 1;
+ return *this;
+ }
+
+ constexpr ClampedNumeric operator--(int) {
+ ClampedNumeric value = *this;
+ *this -= 1;
+ return value;
+ }
+
+ // These perform the actual math operations on the ClampedNumerics.
+ // Binary arithmetic operations.
+ template <template <typename, typename, typename> class M,
+ typename L,
+ typename R>
+ static constexpr ClampedNumeric MathOp(const L lhs, const R rhs) {
+ using Math = typename MathWrapper<M, L, R>::math;
+ return ClampedNumeric<T>(
+ Math::template Do<T>(Wrapper<L>::value(lhs), Wrapper<R>::value(rhs)));
+ }
+
+ // Assignment arithmetic operations.
+ template <template <typename, typename, typename> class M, typename R>
+ constexpr ClampedNumeric& MathOp(const R rhs) {
+ using Math = typename MathWrapper<M, T, R>::math;
+ *this =
+ ClampedNumeric<T>(Math::template Do<T>(value_, Wrapper<R>::value(rhs)));
+ return *this;
+ }
+
+ template <typename Dst>
+ constexpr operator Dst() const {
+ return saturated_cast<typename ArithmeticOrUnderlyingEnum<Dst>::type>(
+ value_);
+ }
+
+ // This method extracts the raw integer value without saturating it to the
+ // destination type as the conversion operator does. This is useful when
+ // e.g. assigning to an auto type or passing as a deduced template parameter.
+ constexpr T RawValue() const { return value_; }
+
+ private:
+ T value_;
+
+ // These wrappers allow us to handle state the same way for both
+ // ClampedNumeric and POD arithmetic types.
+ template <typename Src>
+ struct Wrapper {
+ static constexpr Src value(Src value) {
+ return static_cast<typename UnderlyingType<Src>::type>(value);
+ }
+ };
+};
+
+// Convience wrapper to return a new ClampedNumeric from the provided arithmetic
+// or ClampedNumericType.
+template <typename T>
+constexpr ClampedNumeric<typename UnderlyingType<T>::type> MakeClampedNum(
+ const T value) {
+ return value;
+}
+
+#if !BASE_NUMERICS_DISABLE_OSTREAM_OPERATORS
+// Overload the ostream output operator to make logging work nicely.
+template <typename T>
+std::ostream& operator<<(std::ostream& os, const ClampedNumeric<T>& value) {
+ os << static_cast<T>(value);
+ return os;
+}
+#endif
+
+// These implement the variadic wrapper for the math operations.
+template <template <typename, typename, typename> class M,
+ typename L,
+ typename R>
+constexpr ClampedNumeric<typename MathWrapper<M, L, R>::type> ClampMathOp(
+ const L lhs,
+ const R rhs) {
+ using Math = typename MathWrapper<M, L, R>::math;
+ return ClampedNumeric<typename Math::result_type>::template MathOp<M>(lhs,
+ rhs);
+}
+
+// General purpose wrapper template for arithmetic operations.
+template <template <typename, typename, typename> class M,
+ typename L,
+ typename R,
+ typename... Args>
+constexpr ClampedNumeric<typename ResultType<M, L, R, Args...>::type>
+ClampMathOp(const L lhs, const R rhs, const Args... args) {
+ return ClampMathOp<M>(ClampMathOp<M>(lhs, rhs), args...);
+}
+
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Add, +, +=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Sub, -, -=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Mul, *, *=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Div, /, /=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Mod, %, %=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Lsh, <<, <<=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Rsh, >>, >>=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, And, &, &=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Or, |, |=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Xor, ^, ^=)
+BASE_NUMERIC_ARITHMETIC_VARIADIC(Clamped, Clamp, Max)
+BASE_NUMERIC_ARITHMETIC_VARIADIC(Clamped, Clamp, Min)
+BASE_NUMERIC_COMPARISON_OPERATORS(Clamped, IsLess, <)
+BASE_NUMERIC_COMPARISON_OPERATORS(Clamped, IsLessOrEqual, <=)
+BASE_NUMERIC_COMPARISON_OPERATORS(Clamped, IsGreater, >)
+BASE_NUMERIC_COMPARISON_OPERATORS(Clamped, IsGreaterOrEqual, >=)
+BASE_NUMERIC_COMPARISON_OPERATORS(Clamped, IsEqual, ==)
+BASE_NUMERIC_COMPARISON_OPERATORS(Clamped, IsNotEqual, !=)
+
+} // namespace internal
+
+using internal::ClampedNumeric;
+using internal::MakeClampedNum;
+using internal::ClampMax;
+using internal::ClampMin;
+using internal::ClampAdd;
+using internal::ClampSub;
+using internal::ClampMul;
+using internal::ClampDiv;
+using internal::ClampMod;
+using internal::ClampLsh;
+using internal::ClampRsh;
+using internal::ClampAnd;
+using internal::ClampOr;
+using internal::ClampXor;
+
+} // namespace base
+
+#endif // BASE_NUMERICS_CLAMPED_MATH_H_
diff --git a/security/sandbox/chromium/base/numerics/clamped_math_impl.h b/security/sandbox/chromium/base/numerics/clamped_math_impl.h
new file mode 100644
index 0000000000..303a7e945a
--- /dev/null
+++ b/security/sandbox/chromium/base/numerics/clamped_math_impl.h
@@ -0,0 +1,341 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NUMERICS_CLAMPED_MATH_IMPL_H_
+#define BASE_NUMERICS_CLAMPED_MATH_IMPL_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <climits>
+#include <cmath>
+#include <cstdlib>
+#include <limits>
+#include <type_traits>
+
+#include "base/numerics/checked_math.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/numerics/safe_math_shared_impl.h"
+
+namespace base {
+namespace internal {
+
+template <typename T,
+ typename std::enable_if<std::is_integral<T>::value &&
+ std::is_signed<T>::value>::type* = nullptr>
+constexpr T SaturatedNegWrapper(T value) {
+ return MustTreatAsConstexpr(value) || !ClampedNegFastOp<T>::is_supported
+ ? (NegateWrapper(value) != std::numeric_limits<T>::lowest()
+ ? NegateWrapper(value)
+ : std::numeric_limits<T>::max())
+ : ClampedNegFastOp<T>::Do(value);
+}
+
+template <typename T,
+ typename std::enable_if<std::is_integral<T>::value &&
+ !std::is_signed<T>::value>::type* = nullptr>
+constexpr T SaturatedNegWrapper(T value) {
+ return T(0);
+}
+
+template <
+ typename T,
+ typename std::enable_if<std::is_floating_point<T>::value>::type* = nullptr>
+constexpr T SaturatedNegWrapper(T value) {
+ return -value;
+}
+
+template <typename T,
+ typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
+constexpr T SaturatedAbsWrapper(T value) {
+ // The calculation below is a static identity for unsigned types, but for
+ // signed integer types it provides a non-branching, saturated absolute value.
+ // This works because SafeUnsignedAbs() returns an unsigned type, which can
+ // represent the absolute value of all negative numbers of an equal-width
+ // integer type. The call to IsValueNegative() then detects overflow in the
+ // special case of numeric_limits<T>::min(), by evaluating the bit pattern as
+ // a signed integer value. If it is the overflow case, we end up subtracting
+ // one from the unsigned result, thus saturating to numeric_limits<T>::max().
+ return static_cast<T>(SafeUnsignedAbs(value) -
+ IsValueNegative<T>(SafeUnsignedAbs(value)));
+}
+
+template <
+ typename T,
+ typename std::enable_if<std::is_floating_point<T>::value>::type* = nullptr>
+constexpr T SaturatedAbsWrapper(T value) {
+ return value < 0 ? -value : value;
+}
+
+template <typename T, typename U, class Enable = void>
+struct ClampedAddOp {};
+
+template <typename T, typename U>
+struct ClampedAddOp<T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value &&
+ std::is_integral<U>::value>::type> {
+ using result_type = typename MaxExponentPromotion<T, U>::type;
+ template <typename V = result_type>
+ static constexpr V Do(T x, U y) {
+ if (ClampedAddFastOp<T, U>::is_supported)
+ return ClampedAddFastOp<T, U>::template Do<V>(x, y);
+
+ static_assert(std::is_same<V, result_type>::value ||
+ IsTypeInRangeForNumericType<U, V>::value,
+ "The saturation result cannot be determined from the "
+ "provided types.");
+ const V saturated = CommonMaxOrMin<V>(IsValueNegative(y));
+ V result = {};
+ return BASE_NUMERICS_LIKELY((CheckedAddOp<T, U>::Do(x, y, &result)))
+ ? result
+ : saturated;
+ }
+};
+
+template <typename T, typename U, class Enable = void>
+struct ClampedSubOp {};
+
+template <typename T, typename U>
+struct ClampedSubOp<T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value &&
+ std::is_integral<U>::value>::type> {
+ using result_type = typename MaxExponentPromotion<T, U>::type;
+ template <typename V = result_type>
+ static constexpr V Do(T x, U y) {
+ // TODO(jschuh) Make this "constexpr if" once we're C++17.
+ if (ClampedSubFastOp<T, U>::is_supported)
+ return ClampedSubFastOp<T, U>::template Do<V>(x, y);
+
+ static_assert(std::is_same<V, result_type>::value ||
+ IsTypeInRangeForNumericType<U, V>::value,
+ "The saturation result cannot be determined from the "
+ "provided types.");
+ const V saturated = CommonMaxOrMin<V>(!IsValueNegative(y));
+ V result = {};
+ return BASE_NUMERICS_LIKELY((CheckedSubOp<T, U>::Do(x, y, &result)))
+ ? result
+ : saturated;
+ }
+};
+
+template <typename T, typename U, class Enable = void>
+struct ClampedMulOp {};
+
+template <typename T, typename U>
+struct ClampedMulOp<T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value &&
+ std::is_integral<U>::value>::type> {
+ using result_type = typename MaxExponentPromotion<T, U>::type;
+ template <typename V = result_type>
+ static constexpr V Do(T x, U y) {
+ // TODO(jschuh) Make this "constexpr if" once we're C++17.
+ if (ClampedMulFastOp<T, U>::is_supported)
+ return ClampedMulFastOp<T, U>::template Do<V>(x, y);
+
+ V result = {};
+ const V saturated =
+ CommonMaxOrMin<V>(IsValueNegative(x) ^ IsValueNegative(y));
+ return BASE_NUMERICS_LIKELY((CheckedMulOp<T, U>::Do(x, y, &result)))
+ ? result
+ : saturated;
+ }
+};
+
+template <typename T, typename U, class Enable = void>
+struct ClampedDivOp {};
+
+template <typename T, typename U>
+struct ClampedDivOp<T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value &&
+ std::is_integral<U>::value>::type> {
+ using result_type = typename MaxExponentPromotion<T, U>::type;
+ template <typename V = result_type>
+ static constexpr V Do(T x, U y) {
+ V result = {};
+ if (BASE_NUMERICS_LIKELY((CheckedDivOp<T, U>::Do(x, y, &result))))
+ return result;
+ // Saturation goes to max, min, or NaN (if x is zero).
+ return x ? CommonMaxOrMin<V>(IsValueNegative(x) ^ IsValueNegative(y))
+ : SaturationDefaultLimits<V>::NaN();
+ }
+};
+
+template <typename T, typename U, class Enable = void>
+struct ClampedModOp {};
+
+template <typename T, typename U>
+struct ClampedModOp<T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value &&
+ std::is_integral<U>::value>::type> {
+ using result_type = typename MaxExponentPromotion<T, U>::type;
+ template <typename V = result_type>
+ static constexpr V Do(T x, U y) {
+ V result = {};
+ return BASE_NUMERICS_LIKELY((CheckedModOp<T, U>::Do(x, y, &result)))
+ ? result
+ : x;
+ }
+};
+
+template <typename T, typename U, class Enable = void>
+struct ClampedLshOp {};
+
+// Left shift. Non-zero values saturate in the direction of the sign. A zero
+// shifted by any value always results in zero.
+template <typename T, typename U>
+struct ClampedLshOp<T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value &&
+ std::is_integral<U>::value>::type> {
+ using result_type = T;
+ template <typename V = result_type>
+ static constexpr V Do(T x, U shift) {
+ static_assert(!std::is_signed<U>::value, "Shift value must be unsigned.");
+ if (BASE_NUMERICS_LIKELY(shift < std::numeric_limits<T>::digits)) {
+ // Shift as unsigned to avoid undefined behavior.
+ V result = static_cast<V>(as_unsigned(x) << shift);
+ // If the shift can be reversed, we know it was valid.
+ if (BASE_NUMERICS_LIKELY(result >> shift == x))
+ return result;
+ }
+ return x ? CommonMaxOrMin<V>(IsValueNegative(x)) : 0;
+ }
+};
+
+template <typename T, typename U, class Enable = void>
+struct ClampedRshOp {};
+
+// Right shift. Negative values saturate to -1. Positive or 0 saturates to 0.
+template <typename T, typename U>
+struct ClampedRshOp<T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value &&
+ std::is_integral<U>::value>::type> {
+ using result_type = T;
+ template <typename V = result_type>
+ static constexpr V Do(T x, U shift) {
+ static_assert(!std::is_signed<U>::value, "Shift value must be unsigned.");
+ // Signed right shift is odd, because it saturates to -1 or 0.
+ const V saturated = as_unsigned(V(0)) - IsValueNegative(x);
+ return BASE_NUMERICS_LIKELY(shift < IntegerBitsPlusSign<T>::value)
+ ? saturated_cast<V>(x >> shift)
+ : saturated;
+ }
+};
+
+template <typename T, typename U, class Enable = void>
+struct ClampedAndOp {};
+
+template <typename T, typename U>
+struct ClampedAndOp<T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value &&
+ std::is_integral<U>::value>::type> {
+ using result_type = typename std::make_unsigned<
+ typename MaxExponentPromotion<T, U>::type>::type;
+ template <typename V>
+ static constexpr V Do(T x, U y) {
+ return static_cast<result_type>(x) & static_cast<result_type>(y);
+ }
+};
+
+template <typename T, typename U, class Enable = void>
+struct ClampedOrOp {};
+
+// For simplicity we promote to unsigned integers.
+template <typename T, typename U>
+struct ClampedOrOp<T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value &&
+ std::is_integral<U>::value>::type> {
+ using result_type = typename std::make_unsigned<
+ typename MaxExponentPromotion<T, U>::type>::type;
+ template <typename V>
+ static constexpr V Do(T x, U y) {
+ return static_cast<result_type>(x) | static_cast<result_type>(y);
+ }
+};
+
+template <typename T, typename U, class Enable = void>
+struct ClampedXorOp {};
+
+// For simplicity we support only unsigned integers.
+template <typename T, typename U>
+struct ClampedXorOp<T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value &&
+ std::is_integral<U>::value>::type> {
+ using result_type = typename std::make_unsigned<
+ typename MaxExponentPromotion<T, U>::type>::type;
+ template <typename V>
+ static constexpr V Do(T x, U y) {
+ return static_cast<result_type>(x) ^ static_cast<result_type>(y);
+ }
+};
+
+template <typename T, typename U, class Enable = void>
+struct ClampedMaxOp {};
+
+template <typename T, typename U>
+struct ClampedMaxOp<
+ T,
+ U,
+ typename std::enable_if<std::is_arithmetic<T>::value &&
+ std::is_arithmetic<U>::value>::type> {
+ using result_type = typename MaxExponentPromotion<T, U>::type;
+ template <typename V = result_type>
+ static constexpr V Do(T x, U y) {
+ return IsGreater<T, U>::Test(x, y) ? saturated_cast<V>(x)
+ : saturated_cast<V>(y);
+ }
+};
+
+template <typename T, typename U, class Enable = void>
+struct ClampedMinOp {};
+
+template <typename T, typename U>
+struct ClampedMinOp<
+ T,
+ U,
+ typename std::enable_if<std::is_arithmetic<T>::value &&
+ std::is_arithmetic<U>::value>::type> {
+ using result_type = typename LowestValuePromotion<T, U>::type;
+ template <typename V = result_type>
+ static constexpr V Do(T x, U y) {
+ return IsLess<T, U>::Test(x, y) ? saturated_cast<V>(x)
+ : saturated_cast<V>(y);
+ }
+};
+
+// This is just boilerplate that wraps the standard floating point arithmetic.
+// A macro isn't the nicest solution, but it beats rewriting these repeatedly.
+#define BASE_FLOAT_ARITHMETIC_OPS(NAME, OP) \
+ template <typename T, typename U> \
+ struct Clamped##NAME##Op< \
+ T, U, \
+ typename std::enable_if<std::is_floating_point<T>::value || \
+ std::is_floating_point<U>::value>::type> { \
+ using result_type = typename MaxExponentPromotion<T, U>::type; \
+ template <typename V = result_type> \
+ static constexpr V Do(T x, U y) { \
+ return saturated_cast<V>(x OP y); \
+ } \
+ };
+
+BASE_FLOAT_ARITHMETIC_OPS(Add, +)
+BASE_FLOAT_ARITHMETIC_OPS(Sub, -)
+BASE_FLOAT_ARITHMETIC_OPS(Mul, *)
+BASE_FLOAT_ARITHMETIC_OPS(Div, /)
+
+#undef BASE_FLOAT_ARITHMETIC_OPS
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_NUMERICS_CLAMPED_MATH_IMPL_H_
diff --git a/security/sandbox/chromium/base/numerics/safe_conversions.h b/security/sandbox/chromium/base/numerics/safe_conversions.h
new file mode 100644
index 0000000000..b9636fec42
--- /dev/null
+++ b/security/sandbox/chromium/base/numerics/safe_conversions.h
@@ -0,0 +1,358 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NUMERICS_SAFE_CONVERSIONS_H_
+#define BASE_NUMERICS_SAFE_CONVERSIONS_H_
+
+#include <stddef.h>
+
+#include <limits>
+#include <type_traits>
+
+#include "base/numerics/safe_conversions_impl.h"
+
+#if !defined(__native_client__) && (defined(__ARMEL__) || defined(__arch64__))
+#include "base/numerics/safe_conversions_arm_impl.h"
+#define BASE_HAS_OPTIMIZED_SAFE_CONVERSIONS (1)
+#else
+#define BASE_HAS_OPTIMIZED_SAFE_CONVERSIONS (0)
+#endif
+
+#if !BASE_NUMERICS_DISABLE_OSTREAM_OPERATORS
+#include <ostream>
+#endif
+
+namespace base {
+namespace internal {
+
+#if !BASE_HAS_OPTIMIZED_SAFE_CONVERSIONS
+template <typename Dst, typename Src>
+struct SaturateFastAsmOp {
+ static const bool is_supported = false;
+ static constexpr Dst Do(Src) {
+ // Force a compile failure if instantiated.
+ return CheckOnFailure::template HandleFailure<Dst>();
+ }
+};
+#endif // BASE_HAS_OPTIMIZED_SAFE_CONVERSIONS
+#undef BASE_HAS_OPTIMIZED_SAFE_CONVERSIONS
+
+// The following special case a few specific integer conversions where we can
+// eke out better performance than range checking.
+template <typename Dst, typename Src, typename Enable = void>
+struct IsValueInRangeFastOp {
+ static const bool is_supported = false;
+ static constexpr bool Do(Src value) {
+ // Force a compile failure if instantiated.
+ return CheckOnFailure::template HandleFailure<bool>();
+ }
+};
+
+// Signed to signed range comparison.
+template <typename Dst, typename Src>
+struct IsValueInRangeFastOp<
+ Dst,
+ Src,
+ typename std::enable_if<
+ std::is_integral<Dst>::value && std::is_integral<Src>::value &&
+ std::is_signed<Dst>::value && std::is_signed<Src>::value &&
+ !IsTypeInRangeForNumericType<Dst, Src>::value>::type> {
+ static const bool is_supported = true;
+
+ static constexpr bool Do(Src value) {
+ // Just downcast to the smaller type, sign extend it back to the original
+ // type, and then see if it matches the original value.
+ return value == static_cast<Dst>(value);
+ }
+};
+
+// Signed to unsigned range comparison.
+template <typename Dst, typename Src>
+struct IsValueInRangeFastOp<
+ Dst,
+ Src,
+ typename std::enable_if<
+ std::is_integral<Dst>::value && std::is_integral<Src>::value &&
+ !std::is_signed<Dst>::value && std::is_signed<Src>::value &&
+ !IsTypeInRangeForNumericType<Dst, Src>::value>::type> {
+ static const bool is_supported = true;
+
+ static constexpr bool Do(Src value) {
+ // We cast a signed as unsigned to overflow negative values to the top,
+ // then compare against whichever maximum is smaller, as our upper bound.
+ return as_unsigned(value) <= as_unsigned(CommonMax<Src, Dst>());
+ }
+};
+
+// Convenience function that returns true if the supplied value is in range
+// for the destination type.
+template <typename Dst, typename Src>
+constexpr bool IsValueInRangeForNumericType(Src value) {
+ using SrcType = typename internal::UnderlyingType<Src>::type;
+ return internal::IsValueInRangeFastOp<Dst, SrcType>::is_supported
+ ? internal::IsValueInRangeFastOp<Dst, SrcType>::Do(
+ static_cast<SrcType>(value))
+ : internal::DstRangeRelationToSrcRange<Dst>(
+ static_cast<SrcType>(value))
+ .IsValid();
+}
+
+// checked_cast<> is analogous to static_cast<> for numeric types,
+// except that it CHECKs that the specified numeric conversion will not
+// overflow or underflow. NaN source will always trigger a CHECK.
+template <typename Dst,
+ class CheckHandler = internal::CheckOnFailure,
+ typename Src>
+constexpr Dst checked_cast(Src value) {
+ // This throws a compile-time error on evaluating the constexpr if it can be
+ // determined at compile-time as failing, otherwise it will CHECK at runtime.
+ using SrcType = typename internal::UnderlyingType<Src>::type;
+ return BASE_NUMERICS_LIKELY((IsValueInRangeForNumericType<Dst>(value)))
+ ? static_cast<Dst>(static_cast<SrcType>(value))
+ : CheckHandler::template HandleFailure<Dst>();
+}
+
+// Default boundaries for integral/float: max/infinity, lowest/-infinity, 0/NaN.
+// You may provide your own limits (e.g. to saturated_cast) so long as you
+// implement all of the static constexpr member functions in the class below.
+template <typename T>
+struct SaturationDefaultLimits : public std::numeric_limits<T> {
+ static constexpr T NaN() {
+ return std::numeric_limits<T>::has_quiet_NaN
+ ? std::numeric_limits<T>::quiet_NaN()
+ : T();
+ }
+ using std::numeric_limits<T>::max;
+ static constexpr T Overflow() {
+ return std::numeric_limits<T>::has_infinity
+ ? std::numeric_limits<T>::infinity()
+ : std::numeric_limits<T>::max();
+ }
+ using std::numeric_limits<T>::lowest;
+ static constexpr T Underflow() {
+ return std::numeric_limits<T>::has_infinity
+ ? std::numeric_limits<T>::infinity() * -1
+ : std::numeric_limits<T>::lowest();
+ }
+};
+
+template <typename Dst, template <typename> class S, typename Src>
+constexpr Dst saturated_cast_impl(Src value, RangeCheck constraint) {
+ // For some reason clang generates much better code when the branch is
+ // structured exactly this way, rather than a sequence of checks.
+ return !constraint.IsOverflowFlagSet()
+ ? (!constraint.IsUnderflowFlagSet() ? static_cast<Dst>(value)
+ : S<Dst>::Underflow())
+ // Skip this check for integral Src, which cannot be NaN.
+ : (std::is_integral<Src>::value || !constraint.IsUnderflowFlagSet()
+ ? S<Dst>::Overflow()
+ : S<Dst>::NaN());
+}
+
+// We can reduce the number of conditions and get slightly better performance
+// for normal signed and unsigned integer ranges. And in the specific case of
+// Arm, we can use the optimized saturation instructions.
+template <typename Dst, typename Src, typename Enable = void>
+struct SaturateFastOp {
+ static const bool is_supported = false;
+ static constexpr Dst Do(Src value) {
+ // Force a compile failure if instantiated.
+ return CheckOnFailure::template HandleFailure<Dst>();
+ }
+};
+
+template <typename Dst, typename Src>
+struct SaturateFastOp<
+ Dst,
+ Src,
+ typename std::enable_if<std::is_integral<Src>::value &&
+ std::is_integral<Dst>::value &&
+ SaturateFastAsmOp<Dst, Src>::is_supported>::type> {
+ static const bool is_supported = true;
+ static Dst Do(Src value) { return SaturateFastAsmOp<Dst, Src>::Do(value); }
+};
+
+template <typename Dst, typename Src>
+struct SaturateFastOp<
+ Dst,
+ Src,
+ typename std::enable_if<std::is_integral<Src>::value &&
+ std::is_integral<Dst>::value &&
+ !SaturateFastAsmOp<Dst, Src>::is_supported>::type> {
+ static const bool is_supported = true;
+ static Dst Do(Src value) {
+ // The exact order of the following is structured to hit the correct
+ // optimization heuristics across compilers. Do not change without
+ // checking the emitted code.
+ Dst saturated = CommonMaxOrMin<Dst, Src>(
+ IsMaxInRangeForNumericType<Dst, Src>() ||
+ (!IsMinInRangeForNumericType<Dst, Src>() && IsValueNegative(value)));
+ return BASE_NUMERICS_LIKELY(IsValueInRangeForNumericType<Dst>(value))
+ ? static_cast<Dst>(value)
+ : saturated;
+ }
+};
+
+// saturated_cast<> is analogous to static_cast<> for numeric types, except
+// that the specified numeric conversion will saturate by default rather than
+// overflow or underflow, and NaN assignment to an integral will return 0.
+// All boundary condition behaviors can be overriden with a custom handler.
+template <typename Dst,
+ template <typename> class SaturationHandler = SaturationDefaultLimits,
+ typename Src>
+constexpr Dst saturated_cast(Src value) {
+ using SrcType = typename UnderlyingType<Src>::type;
+ return !IsCompileTimeConstant(value) &&
+ SaturateFastOp<Dst, SrcType>::is_supported &&
+ std::is_same<SaturationHandler<Dst>,
+ SaturationDefaultLimits<Dst>>::value
+ ? SaturateFastOp<Dst, SrcType>::Do(static_cast<SrcType>(value))
+ : saturated_cast_impl<Dst, SaturationHandler, SrcType>(
+ static_cast<SrcType>(value),
+ DstRangeRelationToSrcRange<Dst, SaturationHandler, SrcType>(
+ static_cast<SrcType>(value)));
+}
+
+// strict_cast<> is analogous to static_cast<> for numeric types, except that
+// it will cause a compile failure if the destination type is not large enough
+// to contain any value in the source type. It performs no runtime checking.
+template <typename Dst, typename Src>
+constexpr Dst strict_cast(Src value) {
+ using SrcType = typename UnderlyingType<Src>::type;
+ static_assert(UnderlyingType<Src>::is_numeric, "Argument must be numeric.");
+ static_assert(std::is_arithmetic<Dst>::value, "Result must be numeric.");
+
+ // If you got here from a compiler error, it's because you tried to assign
+ // from a source type to a destination type that has insufficient range.
+ // The solution may be to change the destination type you're assigning to,
+ // and use one large enough to represent the source.
+ // Alternatively, you may be better served with the checked_cast<> or
+ // saturated_cast<> template functions for your particular use case.
+ static_assert(StaticDstRangeRelationToSrcRange<Dst, SrcType>::value ==
+ NUMERIC_RANGE_CONTAINED,
+ "The source type is out of range for the destination type. "
+ "Please see strict_cast<> comments for more information.");
+
+ return static_cast<Dst>(static_cast<SrcType>(value));
+}
+
+// Some wrappers to statically check that a type is in range.
+template <typename Dst, typename Src, class Enable = void>
+struct IsNumericRangeContained {
+ static const bool value = false;
+};
+
+template <typename Dst, typename Src>
+struct IsNumericRangeContained<
+ Dst,
+ Src,
+ typename std::enable_if<ArithmeticOrUnderlyingEnum<Dst>::value &&
+ ArithmeticOrUnderlyingEnum<Src>::value>::type> {
+ static const bool value = StaticDstRangeRelationToSrcRange<Dst, Src>::value ==
+ NUMERIC_RANGE_CONTAINED;
+};
+
+// StrictNumeric implements compile time range checking between numeric types by
+// wrapping assignment operations in a strict_cast. This class is intended to be
+// used for function arguments and return types, to ensure the destination type
+// can always contain the source type. This is essentially the same as enforcing
+// -Wconversion in gcc and C4302 warnings on MSVC, but it can be applied
+// incrementally at API boundaries, making it easier to convert code so that it
+// compiles cleanly with truncation warnings enabled.
+// This template should introduce no runtime overhead, but it also provides no
+// runtime checking of any of the associated mathematical operations. Use
+// CheckedNumeric for runtime range checks of the actual value being assigned.
+template <typename T>
+class StrictNumeric {
+ public:
+ using type = T;
+
+ constexpr StrictNumeric() : value_(0) {}
+
+ // Copy constructor.
+ template <typename Src>
+ constexpr StrictNumeric(const StrictNumeric<Src>& rhs)
+ : value_(strict_cast<T>(rhs.value_)) {}
+
+ // This is not an explicit constructor because we implicitly upgrade regular
+ // numerics to StrictNumerics to make them easier to use.
+ template <typename Src>
+ constexpr StrictNumeric(Src value) // NOLINT(runtime/explicit)
+ : value_(strict_cast<T>(value)) {}
+
+ // If you got here from a compiler error, it's because you tried to assign
+ // from a source type to a destination type that has insufficient range.
+ // The solution may be to change the destination type you're assigning to,
+ // and use one large enough to represent the source.
+ // If you're assigning from a CheckedNumeric<> class, you may be able to use
+ // the AssignIfValid() member function, specify a narrower destination type to
+ // the member value functions (e.g. val.template ValueOrDie<Dst>()), use one
+ // of the value helper functions (e.g. ValueOrDieForType<Dst>(val)).
+ // If you've encountered an _ambiguous overload_ you can use a static_cast<>
+ // to explicitly cast the result to the destination type.
+ // If none of that works, you may be better served with the checked_cast<> or
+ // saturated_cast<> template functions for your particular use case.
+ template <typename Dst,
+ typename std::enable_if<
+ IsNumericRangeContained<Dst, T>::value>::type* = nullptr>
+ constexpr operator Dst() const {
+ return static_cast<typename ArithmeticOrUnderlyingEnum<Dst>::type>(value_);
+ }
+
+ private:
+ const T value_;
+};
+
+// Convience wrapper returns a StrictNumeric from the provided arithmetic type.
+template <typename T>
+constexpr StrictNumeric<typename UnderlyingType<T>::type> MakeStrictNum(
+ const T value) {
+ return value;
+}
+
+#if !BASE_NUMERICS_DISABLE_OSTREAM_OPERATORS
+// Overload the ostream output operator to make logging work nicely.
+template <typename T>
+std::ostream& operator<<(std::ostream& os, const StrictNumeric<T>& value) {
+ os << static_cast<T>(value);
+ return os;
+}
+#endif
+
+#define BASE_NUMERIC_COMPARISON_OPERATORS(CLASS, NAME, OP) \
+ template <typename L, typename R, \
+ typename std::enable_if< \
+ internal::Is##CLASS##Op<L, R>::value>::type* = nullptr> \
+ constexpr bool operator OP(const L lhs, const R rhs) { \
+ return SafeCompare<NAME, typename UnderlyingType<L>::type, \
+ typename UnderlyingType<R>::type>(lhs, rhs); \
+ }
+
+BASE_NUMERIC_COMPARISON_OPERATORS(Strict, IsLess, <)
+BASE_NUMERIC_COMPARISON_OPERATORS(Strict, IsLessOrEqual, <=)
+BASE_NUMERIC_COMPARISON_OPERATORS(Strict, IsGreater, >)
+BASE_NUMERIC_COMPARISON_OPERATORS(Strict, IsGreaterOrEqual, >=)
+BASE_NUMERIC_COMPARISON_OPERATORS(Strict, IsEqual, ==)
+BASE_NUMERIC_COMPARISON_OPERATORS(Strict, IsNotEqual, !=)
+
+} // namespace internal
+
+using internal::as_signed;
+using internal::as_unsigned;
+using internal::checked_cast;
+using internal::strict_cast;
+using internal::saturated_cast;
+using internal::SafeUnsignedAbs;
+using internal::StrictNumeric;
+using internal::MakeStrictNum;
+using internal::IsValueInRangeForNumericType;
+using internal::IsTypeInRangeForNumericType;
+using internal::IsValueNegative;
+
+// Explicitly make a shorter size_t alias for convenience.
+using SizeT = StrictNumeric<size_t>;
+
+} // namespace base
+
+#endif // BASE_NUMERICS_SAFE_CONVERSIONS_H_
diff --git a/security/sandbox/chromium/base/numerics/safe_conversions_arm_impl.h b/security/sandbox/chromium/base/numerics/safe_conversions_arm_impl.h
new file mode 100644
index 0000000000..cf31072b9b
--- /dev/null
+++ b/security/sandbox/chromium/base/numerics/safe_conversions_arm_impl.h
@@ -0,0 +1,51 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NUMERICS_SAFE_CONVERSIONS_ARM_IMPL_H_
+#define BASE_NUMERICS_SAFE_CONVERSIONS_ARM_IMPL_H_
+
+#include <cassert>
+#include <limits>
+#include <type_traits>
+
+#include "base/numerics/safe_conversions_impl.h"
+
+namespace base {
+namespace internal {
+
+// Fast saturation to a destination type.
+template <typename Dst, typename Src>
+struct SaturateFastAsmOp {
+ static constexpr bool is_supported =
+ std::is_signed<Src>::value && std::is_integral<Dst>::value &&
+ std::is_integral<Src>::value &&
+ IntegerBitsPlusSign<Src>::value <= IntegerBitsPlusSign<int32_t>::value &&
+ IntegerBitsPlusSign<Dst>::value <= IntegerBitsPlusSign<int32_t>::value &&
+ !IsTypeInRangeForNumericType<Dst, Src>::value;
+
+ __attribute__((always_inline)) static Dst Do(Src value) {
+ int32_t src = value;
+ typename std::conditional<std::is_signed<Dst>::value, int32_t,
+ uint32_t>::type result;
+ if (std::is_signed<Dst>::value) {
+ asm("ssat %[dst], %[shift], %[src]"
+ : [dst] "=r"(result)
+ : [src] "r"(src), [shift] "n"(IntegerBitsPlusSign<Dst>::value <= 32
+ ? IntegerBitsPlusSign<Dst>::value
+ : 32));
+ } else {
+ asm("usat %[dst], %[shift], %[src]"
+ : [dst] "=r"(result)
+ : [src] "r"(src), [shift] "n"(IntegerBitsPlusSign<Dst>::value < 32
+ ? IntegerBitsPlusSign<Dst>::value
+ : 31));
+ }
+ return static_cast<Dst>(result);
+ }
+};
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_NUMERICS_SAFE_CONVERSIONS_ARM_IMPL_H_
diff --git a/security/sandbox/chromium/base/numerics/safe_conversions_impl.h b/security/sandbox/chromium/base/numerics/safe_conversions_impl.h
new file mode 100644
index 0000000000..7c5ca68c3b
--- /dev/null
+++ b/security/sandbox/chromium/base/numerics/safe_conversions_impl.h
@@ -0,0 +1,851 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NUMERICS_SAFE_CONVERSIONS_IMPL_H_
+#define BASE_NUMERICS_SAFE_CONVERSIONS_IMPL_H_
+
+#include <stdint.h>
+
+#include <limits>
+#include <type_traits>
+
+#if defined(__GNUC__) || defined(__clang__)
+#define BASE_NUMERICS_LIKELY(x) __builtin_expect(!!(x), 1)
+#define BASE_NUMERICS_UNLIKELY(x) __builtin_expect(!!(x), 0)
+#else
+#define BASE_NUMERICS_LIKELY(x) (x)
+#define BASE_NUMERICS_UNLIKELY(x) (x)
+#endif
+
+namespace base {
+namespace internal {
+
+// The std library doesn't provide a binary max_exponent for integers, however
+// we can compute an analog using std::numeric_limits<>::digits.
+template <typename NumericType>
+struct MaxExponent {
+ static const int value = std::is_floating_point<NumericType>::value
+ ? std::numeric_limits<NumericType>::max_exponent
+ : std::numeric_limits<NumericType>::digits + 1;
+};
+
+// The number of bits (including the sign) in an integer. Eliminates sizeof
+// hacks.
+template <typename NumericType>
+struct IntegerBitsPlusSign {
+ static const int value = std::numeric_limits<NumericType>::digits +
+ std::is_signed<NumericType>::value;
+};
+
+// Helper templates for integer manipulations.
+
+template <typename Integer>
+struct PositionOfSignBit {
+ static const size_t value = IntegerBitsPlusSign<Integer>::value - 1;
+};
+
+// Determines if a numeric value is negative without throwing compiler
+// warnings on: unsigned(value) < 0.
+template <typename T,
+ typename std::enable_if<std::is_signed<T>::value>::type* = nullptr>
+constexpr bool IsValueNegative(T value) {
+ static_assert(std::is_arithmetic<T>::value, "Argument must be numeric.");
+ return value < 0;
+}
+
+template <typename T,
+ typename std::enable_if<!std::is_signed<T>::value>::type* = nullptr>
+constexpr bool IsValueNegative(T) {
+ static_assert(std::is_arithmetic<T>::value, "Argument must be numeric.");
+ return false;
+}
+
+// This performs a fast negation, returning a signed value. It works on unsigned
+// arguments, but probably doesn't do what you want for any unsigned value
+// larger than max / 2 + 1 (i.e. signed min cast to unsigned).
+template <typename T>
+constexpr typename std::make_signed<T>::type ConditionalNegate(
+ T x,
+ bool is_negative) {
+ static_assert(std::is_integral<T>::value, "Type must be integral");
+ using SignedT = typename std::make_signed<T>::type;
+ using UnsignedT = typename std::make_unsigned<T>::type;
+ return static_cast<SignedT>(
+ (static_cast<UnsignedT>(x) ^ -SignedT(is_negative)) + is_negative);
+}
+
+// This performs a safe, absolute value via unsigned overflow.
+template <typename T>
+constexpr typename std::make_unsigned<T>::type SafeUnsignedAbs(T value) {
+ static_assert(std::is_integral<T>::value, "Type must be integral");
+ using UnsignedT = typename std::make_unsigned<T>::type;
+ return IsValueNegative(value)
+ ? static_cast<UnsignedT>(0u - static_cast<UnsignedT>(value))
+ : static_cast<UnsignedT>(value);
+}
+
+// This allows us to switch paths on known compile-time constants.
+#if defined(__clang__) || defined(__GNUC__)
+constexpr bool CanDetectCompileTimeConstant() {
+ return true;
+}
+template <typename T>
+constexpr bool IsCompileTimeConstant(const T v) {
+ return __builtin_constant_p(v);
+}
+#else
+constexpr bool CanDetectCompileTimeConstant() {
+ return false;
+}
+template <typename T>
+constexpr bool IsCompileTimeConstant(const T) {
+ return false;
+}
+#endif
+template <typename T>
+constexpr bool MustTreatAsConstexpr(const T v) {
+ // Either we can't detect a compile-time constant, and must always use the
+ // constexpr path, or we know we have a compile-time constant.
+ return !CanDetectCompileTimeConstant() || IsCompileTimeConstant(v);
+}
+
+// Forces a crash, like a CHECK(false). Used for numeric boundary errors.
+// Also used in a constexpr template to trigger a compilation failure on
+// an error condition.
+struct CheckOnFailure {
+ template <typename T>
+ static T HandleFailure() {
+#if defined(_MSC_VER)
+ __debugbreak();
+#elif defined(__GNUC__) || defined(__clang__)
+ __builtin_trap();
+#else
+ ((void)(*(volatile char*)0 = 0));
+#endif
+ return T();
+ }
+};
+
+enum IntegerRepresentation {
+ INTEGER_REPRESENTATION_UNSIGNED,
+ INTEGER_REPRESENTATION_SIGNED
+};
+
+// A range for a given nunmeric Src type is contained for a given numeric Dst
+// type if both numeric_limits<Src>::max() <= numeric_limits<Dst>::max() and
+// numeric_limits<Src>::lowest() >= numeric_limits<Dst>::lowest() are true.
+// We implement this as template specializations rather than simple static
+// comparisons to ensure type correctness in our comparisons.
+enum NumericRangeRepresentation {
+ NUMERIC_RANGE_NOT_CONTAINED,
+ NUMERIC_RANGE_CONTAINED
+};
+
+// Helper templates to statically determine if our destination type can contain
+// maximum and minimum values represented by the source type.
+
+template <typename Dst,
+ typename Src,
+ IntegerRepresentation DstSign = std::is_signed<Dst>::value
+ ? INTEGER_REPRESENTATION_SIGNED
+ : INTEGER_REPRESENTATION_UNSIGNED,
+ IntegerRepresentation SrcSign = std::is_signed<Src>::value
+ ? INTEGER_REPRESENTATION_SIGNED
+ : INTEGER_REPRESENTATION_UNSIGNED>
+struct StaticDstRangeRelationToSrcRange;
+
+// Same sign: Dst is guaranteed to contain Src only if its range is equal or
+// larger.
+template <typename Dst, typename Src, IntegerRepresentation Sign>
+struct StaticDstRangeRelationToSrcRange<Dst, Src, Sign, Sign> {
+ static const NumericRangeRepresentation value =
+ MaxExponent<Dst>::value >= MaxExponent<Src>::value
+ ? NUMERIC_RANGE_CONTAINED
+ : NUMERIC_RANGE_NOT_CONTAINED;
+};
+
+// Unsigned to signed: Dst is guaranteed to contain source only if its range is
+// larger.
+template <typename Dst, typename Src>
+struct StaticDstRangeRelationToSrcRange<Dst,
+ Src,
+ INTEGER_REPRESENTATION_SIGNED,
+ INTEGER_REPRESENTATION_UNSIGNED> {
+ static const NumericRangeRepresentation value =
+ MaxExponent<Dst>::value > MaxExponent<Src>::value
+ ? NUMERIC_RANGE_CONTAINED
+ : NUMERIC_RANGE_NOT_CONTAINED;
+};
+
+// Signed to unsigned: Dst cannot be statically determined to contain Src.
+template <typename Dst, typename Src>
+struct StaticDstRangeRelationToSrcRange<Dst,
+ Src,
+ INTEGER_REPRESENTATION_UNSIGNED,
+ INTEGER_REPRESENTATION_SIGNED> {
+ static const NumericRangeRepresentation value = NUMERIC_RANGE_NOT_CONTAINED;
+};
+
+// This class wraps the range constraints as separate booleans so the compiler
+// can identify constants and eliminate unused code paths.
+class RangeCheck {
+ public:
+ constexpr RangeCheck(bool is_in_lower_bound, bool is_in_upper_bound)
+ : is_underflow_(!is_in_lower_bound), is_overflow_(!is_in_upper_bound) {}
+ constexpr RangeCheck() : is_underflow_(0), is_overflow_(0) {}
+ constexpr bool IsValid() const { return !is_overflow_ && !is_underflow_; }
+ constexpr bool IsInvalid() const { return is_overflow_ && is_underflow_; }
+ constexpr bool IsOverflow() const { return is_overflow_ && !is_underflow_; }
+ constexpr bool IsUnderflow() const { return !is_overflow_ && is_underflow_; }
+ constexpr bool IsOverflowFlagSet() const { return is_overflow_; }
+ constexpr bool IsUnderflowFlagSet() const { return is_underflow_; }
+ constexpr bool operator==(const RangeCheck rhs) const {
+ return is_underflow_ == rhs.is_underflow_ &&
+ is_overflow_ == rhs.is_overflow_;
+ }
+ constexpr bool operator!=(const RangeCheck rhs) const {
+ return !(*this == rhs);
+ }
+
+ private:
+ // Do not change the order of these member variables. The integral conversion
+ // optimization depends on this exact order.
+ const bool is_underflow_;
+ const bool is_overflow_;
+};
+
+// The following helper template addresses a corner case in range checks for
+// conversion from a floating-point type to an integral type of smaller range
+// but larger precision (e.g. float -> unsigned). The problem is as follows:
+// 1. Integral maximum is always one less than a power of two, so it must be
+// truncated to fit the mantissa of the floating point. The direction of
+// rounding is implementation defined, but by default it's always IEEE
+// floats, which round to nearest and thus result in a value of larger
+// magnitude than the integral value.
+// Example: float f = UINT_MAX; // f is 4294967296f but UINT_MAX
+// // is 4294967295u.
+// 2. If the floating point value is equal to the promoted integral maximum
+// value, a range check will erroneously pass.
+// Example: (4294967296f <= 4294967295u) // This is true due to a precision
+// // loss in rounding up to float.
+// 3. When the floating point value is then converted to an integral, the
+// resulting value is out of range for the target integral type and
+// thus is implementation defined.
+// Example: unsigned u = (float)INT_MAX; // u will typically overflow to 0.
+// To fix this bug we manually truncate the maximum value when the destination
+// type is an integral of larger precision than the source floating-point type,
+// such that the resulting maximum is represented exactly as a floating point.
+template <typename Dst, typename Src, template <typename> class Bounds>
+struct NarrowingRange {
+ using SrcLimits = std::numeric_limits<Src>;
+ using DstLimits = typename std::numeric_limits<Dst>;
+
+ // Computes the mask required to make an accurate comparison between types.
+ static const int kShift =
+ (MaxExponent<Src>::value > MaxExponent<Dst>::value &&
+ SrcLimits::digits < DstLimits::digits)
+ ? (DstLimits::digits - SrcLimits::digits)
+ : 0;
+ template <
+ typename T,
+ typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
+
+ // Masks out the integer bits that are beyond the precision of the
+ // intermediate type used for comparison.
+ static constexpr T Adjust(T value) {
+ static_assert(std::is_same<T, Dst>::value, "");
+ static_assert(kShift < DstLimits::digits, "");
+ return static_cast<T>(
+ ConditionalNegate(SafeUnsignedAbs(value) & ~((T(1) << kShift) - T(1)),
+ IsValueNegative(value)));
+ }
+
+ template <typename T,
+ typename std::enable_if<std::is_floating_point<T>::value>::type* =
+ nullptr>
+ static constexpr T Adjust(T value) {
+ static_assert(std::is_same<T, Dst>::value, "");
+ static_assert(kShift == 0, "");
+ return value;
+ }
+
+ static constexpr Dst max() { return Adjust(Bounds<Dst>::max()); }
+ static constexpr Dst lowest() { return Adjust(Bounds<Dst>::lowest()); }
+};
+
+template <typename Dst,
+ typename Src,
+ template <typename> class Bounds,
+ IntegerRepresentation DstSign = std::is_signed<Dst>::value
+ ? INTEGER_REPRESENTATION_SIGNED
+ : INTEGER_REPRESENTATION_UNSIGNED,
+ IntegerRepresentation SrcSign = std::is_signed<Src>::value
+ ? INTEGER_REPRESENTATION_SIGNED
+ : INTEGER_REPRESENTATION_UNSIGNED,
+ NumericRangeRepresentation DstRange =
+ StaticDstRangeRelationToSrcRange<Dst, Src>::value>
+struct DstRangeRelationToSrcRangeImpl;
+
+// The following templates are for ranges that must be verified at runtime. We
+// split it into checks based on signedness to avoid confusing casts and
+// compiler warnings on signed an unsigned comparisons.
+
+// Same sign narrowing: The range is contained for normal limits.
+template <typename Dst,
+ typename Src,
+ template <typename> class Bounds,
+ IntegerRepresentation DstSign,
+ IntegerRepresentation SrcSign>
+struct DstRangeRelationToSrcRangeImpl<Dst,
+ Src,
+ Bounds,
+ DstSign,
+ SrcSign,
+ NUMERIC_RANGE_CONTAINED> {
+ static constexpr RangeCheck Check(Src value) {
+ using SrcLimits = std::numeric_limits<Src>;
+ using DstLimits = NarrowingRange<Dst, Src, Bounds>;
+ return RangeCheck(
+ static_cast<Dst>(SrcLimits::lowest()) >= DstLimits::lowest() ||
+ static_cast<Dst>(value) >= DstLimits::lowest(),
+ static_cast<Dst>(SrcLimits::max()) <= DstLimits::max() ||
+ static_cast<Dst>(value) <= DstLimits::max());
+ }
+};
+
+// Signed to signed narrowing: Both the upper and lower boundaries may be
+// exceeded for standard limits.
+template <typename Dst, typename Src, template <typename> class Bounds>
+struct DstRangeRelationToSrcRangeImpl<Dst,
+ Src,
+ Bounds,
+ INTEGER_REPRESENTATION_SIGNED,
+ INTEGER_REPRESENTATION_SIGNED,
+ NUMERIC_RANGE_NOT_CONTAINED> {
+ static constexpr RangeCheck Check(Src value) {
+ using DstLimits = NarrowingRange<Dst, Src, Bounds>;
+ return RangeCheck(value >= DstLimits::lowest(), value <= DstLimits::max());
+ }
+};
+
+// Unsigned to unsigned narrowing: Only the upper bound can be exceeded for
+// standard limits.
+template <typename Dst, typename Src, template <typename> class Bounds>
+struct DstRangeRelationToSrcRangeImpl<Dst,
+ Src,
+ Bounds,
+ INTEGER_REPRESENTATION_UNSIGNED,
+ INTEGER_REPRESENTATION_UNSIGNED,
+ NUMERIC_RANGE_NOT_CONTAINED> {
+ static constexpr RangeCheck Check(Src value) {
+ using DstLimits = NarrowingRange<Dst, Src, Bounds>;
+ return RangeCheck(
+ DstLimits::lowest() == Dst(0) || value >= DstLimits::lowest(),
+ value <= DstLimits::max());
+ }
+};
+
+// Unsigned to signed: Only the upper bound can be exceeded for standard limits.
+template <typename Dst, typename Src, template <typename> class Bounds>
+struct DstRangeRelationToSrcRangeImpl<Dst,
+ Src,
+ Bounds,
+ INTEGER_REPRESENTATION_SIGNED,
+ INTEGER_REPRESENTATION_UNSIGNED,
+ NUMERIC_RANGE_NOT_CONTAINED> {
+ static constexpr RangeCheck Check(Src value) {
+ using DstLimits = NarrowingRange<Dst, Src, Bounds>;
+ using Promotion = decltype(Src() + Dst());
+ return RangeCheck(DstLimits::lowest() <= Dst(0) ||
+ static_cast<Promotion>(value) >=
+ static_cast<Promotion>(DstLimits::lowest()),
+ static_cast<Promotion>(value) <=
+ static_cast<Promotion>(DstLimits::max()));
+ }
+};
+
+// Signed to unsigned: The upper boundary may be exceeded for a narrower Dst,
+// and any negative value exceeds the lower boundary for standard limits.
+template <typename Dst, typename Src, template <typename> class Bounds>
+struct DstRangeRelationToSrcRangeImpl<Dst,
+ Src,
+ Bounds,
+ INTEGER_REPRESENTATION_UNSIGNED,
+ INTEGER_REPRESENTATION_SIGNED,
+ NUMERIC_RANGE_NOT_CONTAINED> {
+ static constexpr RangeCheck Check(Src value) {
+ using SrcLimits = std::numeric_limits<Src>;
+ using DstLimits = NarrowingRange<Dst, Src, Bounds>;
+ using Promotion = decltype(Src() + Dst());
+ return RangeCheck(
+ value >= Src(0) && (DstLimits::lowest() == 0 ||
+ static_cast<Dst>(value) >= DstLimits::lowest()),
+ static_cast<Promotion>(SrcLimits::max()) <=
+ static_cast<Promotion>(DstLimits::max()) ||
+ static_cast<Promotion>(value) <=
+ static_cast<Promotion>(DstLimits::max()));
+ }
+};
+
+// Simple wrapper for statically checking if a type's range is contained.
+template <typename Dst, typename Src>
+struct IsTypeInRangeForNumericType {
+ static const bool value = StaticDstRangeRelationToSrcRange<Dst, Src>::value ==
+ NUMERIC_RANGE_CONTAINED;
+};
+
+template <typename Dst,
+ template <typename> class Bounds = std::numeric_limits,
+ typename Src>
+constexpr RangeCheck DstRangeRelationToSrcRange(Src value) {
+ static_assert(std::is_arithmetic<Src>::value, "Argument must be numeric.");
+ static_assert(std::is_arithmetic<Dst>::value, "Result must be numeric.");
+ static_assert(Bounds<Dst>::lowest() < Bounds<Dst>::max(), "");
+ return DstRangeRelationToSrcRangeImpl<Dst, Src, Bounds>::Check(value);
+}
+
+// Integer promotion templates used by the portable checked integer arithmetic.
+template <size_t Size, bool IsSigned>
+struct IntegerForDigitsAndSign;
+
+#define INTEGER_FOR_DIGITS_AND_SIGN(I) \
+ template <> \
+ struct IntegerForDigitsAndSign<IntegerBitsPlusSign<I>::value, \
+ std::is_signed<I>::value> { \
+ using type = I; \
+ }
+
+INTEGER_FOR_DIGITS_AND_SIGN(int8_t);
+INTEGER_FOR_DIGITS_AND_SIGN(uint8_t);
+INTEGER_FOR_DIGITS_AND_SIGN(int16_t);
+INTEGER_FOR_DIGITS_AND_SIGN(uint16_t);
+INTEGER_FOR_DIGITS_AND_SIGN(int32_t);
+INTEGER_FOR_DIGITS_AND_SIGN(uint32_t);
+INTEGER_FOR_DIGITS_AND_SIGN(int64_t);
+INTEGER_FOR_DIGITS_AND_SIGN(uint64_t);
+#undef INTEGER_FOR_DIGITS_AND_SIGN
+
+// WARNING: We have no IntegerForSizeAndSign<16, *>. If we ever add one to
+// support 128-bit math, then the ArithmeticPromotion template below will need
+// to be updated (or more likely replaced with a decltype expression).
+static_assert(IntegerBitsPlusSign<intmax_t>::value == 64,
+ "Max integer size not supported for this toolchain.");
+
+template <typename Integer, bool IsSigned = std::is_signed<Integer>::value>
+struct TwiceWiderInteger {
+ using type =
+ typename IntegerForDigitsAndSign<IntegerBitsPlusSign<Integer>::value * 2,
+ IsSigned>::type;
+};
+
+enum ArithmeticPromotionCategory {
+ LEFT_PROMOTION, // Use the type of the left-hand argument.
+ RIGHT_PROMOTION // Use the type of the right-hand argument.
+};
+
+// Determines the type that can represent the largest positive value.
+template <typename Lhs,
+ typename Rhs,
+ ArithmeticPromotionCategory Promotion =
+ (MaxExponent<Lhs>::value > MaxExponent<Rhs>::value)
+ ? LEFT_PROMOTION
+ : RIGHT_PROMOTION>
+struct MaxExponentPromotion;
+
+template <typename Lhs, typename Rhs>
+struct MaxExponentPromotion<Lhs, Rhs, LEFT_PROMOTION> {
+ using type = Lhs;
+};
+
+template <typename Lhs, typename Rhs>
+struct MaxExponentPromotion<Lhs, Rhs, RIGHT_PROMOTION> {
+ using type = Rhs;
+};
+
+// Determines the type that can represent the lowest arithmetic value.
+template <typename Lhs,
+ typename Rhs,
+ ArithmeticPromotionCategory Promotion =
+ std::is_signed<Lhs>::value
+ ? (std::is_signed<Rhs>::value
+ ? (MaxExponent<Lhs>::value > MaxExponent<Rhs>::value
+ ? LEFT_PROMOTION
+ : RIGHT_PROMOTION)
+ : LEFT_PROMOTION)
+ : (std::is_signed<Rhs>::value
+ ? RIGHT_PROMOTION
+ : (MaxExponent<Lhs>::value < MaxExponent<Rhs>::value
+ ? LEFT_PROMOTION
+ : RIGHT_PROMOTION))>
+struct LowestValuePromotion;
+
+template <typename Lhs, typename Rhs>
+struct LowestValuePromotion<Lhs, Rhs, LEFT_PROMOTION> {
+ using type = Lhs;
+};
+
+template <typename Lhs, typename Rhs>
+struct LowestValuePromotion<Lhs, Rhs, RIGHT_PROMOTION> {
+ using type = Rhs;
+};
+
+// Determines the type that is best able to represent an arithmetic result.
+template <
+ typename Lhs,
+ typename Rhs = Lhs,
+ bool is_intmax_type =
+ std::is_integral<typename MaxExponentPromotion<Lhs, Rhs>::type>::value&&
+ IntegerBitsPlusSign<typename MaxExponentPromotion<Lhs, Rhs>::type>::
+ value == IntegerBitsPlusSign<intmax_t>::value,
+ bool is_max_exponent =
+ StaticDstRangeRelationToSrcRange<
+ typename MaxExponentPromotion<Lhs, Rhs>::type,
+ Lhs>::value ==
+ NUMERIC_RANGE_CONTAINED&& StaticDstRangeRelationToSrcRange<
+ typename MaxExponentPromotion<Lhs, Rhs>::type,
+ Rhs>::value == NUMERIC_RANGE_CONTAINED>
+struct BigEnoughPromotion;
+
+// The side with the max exponent is big enough.
+template <typename Lhs, typename Rhs, bool is_intmax_type>
+struct BigEnoughPromotion<Lhs, Rhs, is_intmax_type, true> {
+ using type = typename MaxExponentPromotion<Lhs, Rhs>::type;
+ static const bool is_contained = true;
+};
+
+// We can use a twice wider type to fit.
+template <typename Lhs, typename Rhs>
+struct BigEnoughPromotion<Lhs, Rhs, false, false> {
+ using type =
+ typename TwiceWiderInteger<typename MaxExponentPromotion<Lhs, Rhs>::type,
+ std::is_signed<Lhs>::value ||
+ std::is_signed<Rhs>::value>::type;
+ static const bool is_contained = true;
+};
+
+// No type is large enough.
+template <typename Lhs, typename Rhs>
+struct BigEnoughPromotion<Lhs, Rhs, true, false> {
+ using type = typename MaxExponentPromotion<Lhs, Rhs>::type;
+ static const bool is_contained = false;
+};
+
+// We can statically check if operations on the provided types can wrap, so we
+// can skip the checked operations if they're not needed. So, for an integer we
+// care if the destination type preserves the sign and is twice the width of
+// the source.
+template <typename T, typename Lhs, typename Rhs = Lhs>
+struct IsIntegerArithmeticSafe {
+ static const bool value =
+ !std::is_floating_point<T>::value &&
+ !std::is_floating_point<Lhs>::value &&
+ !std::is_floating_point<Rhs>::value &&
+ std::is_signed<T>::value >= std::is_signed<Lhs>::value &&
+ IntegerBitsPlusSign<T>::value >= (2 * IntegerBitsPlusSign<Lhs>::value) &&
+ std::is_signed<T>::value >= std::is_signed<Rhs>::value &&
+ IntegerBitsPlusSign<T>::value >= (2 * IntegerBitsPlusSign<Rhs>::value);
+};
+
+// Promotes to a type that can represent any possible result of a binary
+// arithmetic operation with the source types.
+template <typename Lhs,
+ typename Rhs,
+ bool is_promotion_possible = IsIntegerArithmeticSafe<
+ typename std::conditional<std::is_signed<Lhs>::value ||
+ std::is_signed<Rhs>::value,
+ intmax_t,
+ uintmax_t>::type,
+ typename MaxExponentPromotion<Lhs, Rhs>::type>::value>
+struct FastIntegerArithmeticPromotion;
+
+template <typename Lhs, typename Rhs>
+struct FastIntegerArithmeticPromotion<Lhs, Rhs, true> {
+ using type =
+ typename TwiceWiderInteger<typename MaxExponentPromotion<Lhs, Rhs>::type,
+ std::is_signed<Lhs>::value ||
+ std::is_signed<Rhs>::value>::type;
+ static_assert(IsIntegerArithmeticSafe<type, Lhs, Rhs>::value, "");
+ static const bool is_contained = true;
+};
+
+template <typename Lhs, typename Rhs>
+struct FastIntegerArithmeticPromotion<Lhs, Rhs, false> {
+ using type = typename BigEnoughPromotion<Lhs, Rhs>::type;
+ static const bool is_contained = false;
+};
+
+// Extracts the underlying type from an enum.
+template <typename T, bool is_enum = std::is_enum<T>::value>
+struct ArithmeticOrUnderlyingEnum;
+
+template <typename T>
+struct ArithmeticOrUnderlyingEnum<T, true> {
+ using type = typename std::underlying_type<T>::type;
+ static const bool value = std::is_arithmetic<type>::value;
+};
+
+template <typename T>
+struct ArithmeticOrUnderlyingEnum<T, false> {
+ using type = T;
+ static const bool value = std::is_arithmetic<type>::value;
+};
+
+// The following are helper templates used in the CheckedNumeric class.
+template <typename T>
+class CheckedNumeric;
+
+template <typename T>
+class ClampedNumeric;
+
+template <typename T>
+class StrictNumeric;
+
+// Used to treat CheckedNumeric and arithmetic underlying types the same.
+template <typename T>
+struct UnderlyingType {
+ using type = typename ArithmeticOrUnderlyingEnum<T>::type;
+ static const bool is_numeric = std::is_arithmetic<type>::value;
+ static const bool is_checked = false;
+ static const bool is_clamped = false;
+ static const bool is_strict = false;
+};
+
+template <typename T>
+struct UnderlyingType<CheckedNumeric<T>> {
+ using type = T;
+ static const bool is_numeric = true;
+ static const bool is_checked = true;
+ static const bool is_clamped = false;
+ static const bool is_strict = false;
+};
+
+template <typename T>
+struct UnderlyingType<ClampedNumeric<T>> {
+ using type = T;
+ static const bool is_numeric = true;
+ static const bool is_checked = false;
+ static const bool is_clamped = true;
+ static const bool is_strict = false;
+};
+
+template <typename T>
+struct UnderlyingType<StrictNumeric<T>> {
+ using type = T;
+ static const bool is_numeric = true;
+ static const bool is_checked = false;
+ static const bool is_clamped = false;
+ static const bool is_strict = true;
+};
+
+template <typename L, typename R>
+struct IsCheckedOp {
+ static const bool value =
+ UnderlyingType<L>::is_numeric && UnderlyingType<R>::is_numeric &&
+ (UnderlyingType<L>::is_checked || UnderlyingType<R>::is_checked);
+};
+
+template <typename L, typename R>
+struct IsClampedOp {
+ static const bool value =
+ UnderlyingType<L>::is_numeric && UnderlyingType<R>::is_numeric &&
+ (UnderlyingType<L>::is_clamped || UnderlyingType<R>::is_clamped) &&
+ !(UnderlyingType<L>::is_checked || UnderlyingType<R>::is_checked);
+};
+
+template <typename L, typename R>
+struct IsStrictOp {
+ static const bool value =
+ UnderlyingType<L>::is_numeric && UnderlyingType<R>::is_numeric &&
+ (UnderlyingType<L>::is_strict || UnderlyingType<R>::is_strict) &&
+ !(UnderlyingType<L>::is_checked || UnderlyingType<R>::is_checked) &&
+ !(UnderlyingType<L>::is_clamped || UnderlyingType<R>::is_clamped);
+};
+
+// as_signed<> returns the supplied integral value (or integral castable
+// Numeric template) cast as a signed integral of equivalent precision.
+// I.e. it's mostly an alias for: static_cast<std::make_signed<T>::type>(t)
+template <typename Src>
+constexpr typename std::make_signed<
+ typename base::internal::UnderlyingType<Src>::type>::type
+as_signed(const Src value) {
+ static_assert(std::is_integral<decltype(as_signed(value))>::value,
+ "Argument must be a signed or unsigned integer type.");
+ return static_cast<decltype(as_signed(value))>(value);
+}
+
+// as_unsigned<> returns the supplied integral value (or integral castable
+// Numeric template) cast as an unsigned integral of equivalent precision.
+// I.e. it's mostly an alias for: static_cast<std::make_unsigned<T>::type>(t)
+template <typename Src>
+constexpr typename std::make_unsigned<
+ typename base::internal::UnderlyingType<Src>::type>::type
+as_unsigned(const Src value) {
+ static_assert(std::is_integral<decltype(as_unsigned(value))>::value,
+ "Argument must be a signed or unsigned integer type.");
+ return static_cast<decltype(as_unsigned(value))>(value);
+}
+
+template <typename L, typename R>
+constexpr bool IsLessImpl(const L lhs,
+ const R rhs,
+ const RangeCheck l_range,
+ const RangeCheck r_range) {
+ return l_range.IsUnderflow() || r_range.IsOverflow() ||
+ (l_range == r_range &&
+ static_cast<decltype(lhs + rhs)>(lhs) <
+ static_cast<decltype(lhs + rhs)>(rhs));
+}
+
+template <typename L, typename R>
+struct IsLess {
+ static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+ "Types must be numeric.");
+ static constexpr bool Test(const L lhs, const R rhs) {
+ return IsLessImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
+ DstRangeRelationToSrcRange<L>(rhs));
+ }
+};
+
+template <typename L, typename R>
+constexpr bool IsLessOrEqualImpl(const L lhs,
+ const R rhs,
+ const RangeCheck l_range,
+ const RangeCheck r_range) {
+ return l_range.IsUnderflow() || r_range.IsOverflow() ||
+ (l_range == r_range &&
+ static_cast<decltype(lhs + rhs)>(lhs) <=
+ static_cast<decltype(lhs + rhs)>(rhs));
+}
+
+template <typename L, typename R>
+struct IsLessOrEqual {
+ static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+ "Types must be numeric.");
+ static constexpr bool Test(const L lhs, const R rhs) {
+ return IsLessOrEqualImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
+ DstRangeRelationToSrcRange<L>(rhs));
+ }
+};
+
+template <typename L, typename R>
+constexpr bool IsGreaterImpl(const L lhs,
+ const R rhs,
+ const RangeCheck l_range,
+ const RangeCheck r_range) {
+ return l_range.IsOverflow() || r_range.IsUnderflow() ||
+ (l_range == r_range &&
+ static_cast<decltype(lhs + rhs)>(lhs) >
+ static_cast<decltype(lhs + rhs)>(rhs));
+}
+
+template <typename L, typename R>
+struct IsGreater {
+ static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+ "Types must be numeric.");
+ static constexpr bool Test(const L lhs, const R rhs) {
+ return IsGreaterImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
+ DstRangeRelationToSrcRange<L>(rhs));
+ }
+};
+
+template <typename L, typename R>
+constexpr bool IsGreaterOrEqualImpl(const L lhs,
+ const R rhs,
+ const RangeCheck l_range,
+ const RangeCheck r_range) {
+ return l_range.IsOverflow() || r_range.IsUnderflow() ||
+ (l_range == r_range &&
+ static_cast<decltype(lhs + rhs)>(lhs) >=
+ static_cast<decltype(lhs + rhs)>(rhs));
+}
+
+template <typename L, typename R>
+struct IsGreaterOrEqual {
+ static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+ "Types must be numeric.");
+ static constexpr bool Test(const L lhs, const R rhs) {
+ return IsGreaterOrEqualImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
+ DstRangeRelationToSrcRange<L>(rhs));
+ }
+};
+
+template <typename L, typename R>
+struct IsEqual {
+ static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+ "Types must be numeric.");
+ static constexpr bool Test(const L lhs, const R rhs) {
+ return DstRangeRelationToSrcRange<R>(lhs) ==
+ DstRangeRelationToSrcRange<L>(rhs) &&
+ static_cast<decltype(lhs + rhs)>(lhs) ==
+ static_cast<decltype(lhs + rhs)>(rhs);
+ }
+};
+
+template <typename L, typename R>
+struct IsNotEqual {
+ static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+ "Types must be numeric.");
+ static constexpr bool Test(const L lhs, const R rhs) {
+ return DstRangeRelationToSrcRange<R>(lhs) !=
+ DstRangeRelationToSrcRange<L>(rhs) ||
+ static_cast<decltype(lhs + rhs)>(lhs) !=
+ static_cast<decltype(lhs + rhs)>(rhs);
+ }
+};
+
+// These perform the actual math operations on the CheckedNumerics.
+// Binary arithmetic operations.
+template <template <typename, typename> class C, typename L, typename R>
+constexpr bool SafeCompare(const L lhs, const R rhs) {
+ static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+ "Types must be numeric.");
+ using Promotion = BigEnoughPromotion<L, R>;
+ using BigType = typename Promotion::type;
+ return Promotion::is_contained
+ // Force to a larger type for speed if both are contained.
+ ? C<BigType, BigType>::Test(
+ static_cast<BigType>(static_cast<L>(lhs)),
+ static_cast<BigType>(static_cast<R>(rhs)))
+ // Let the template functions figure it out for mixed types.
+ : C<L, R>::Test(lhs, rhs);
+}
+
+template <typename Dst, typename Src>
+constexpr bool IsMaxInRangeForNumericType() {
+ return IsGreaterOrEqual<Dst, Src>::Test(std::numeric_limits<Dst>::max(),
+ std::numeric_limits<Src>::max());
+}
+
+template <typename Dst, typename Src>
+constexpr bool IsMinInRangeForNumericType() {
+ return IsLessOrEqual<Dst, Src>::Test(std::numeric_limits<Dst>::lowest(),
+ std::numeric_limits<Src>::lowest());
+}
+
+template <typename Dst, typename Src>
+constexpr Dst CommonMax() {
+ return !IsMaxInRangeForNumericType<Dst, Src>()
+ ? Dst(std::numeric_limits<Dst>::max())
+ : Dst(std::numeric_limits<Src>::max());
+}
+
+template <typename Dst, typename Src>
+constexpr Dst CommonMin() {
+ return !IsMinInRangeForNumericType<Dst, Src>()
+ ? Dst(std::numeric_limits<Dst>::lowest())
+ : Dst(std::numeric_limits<Src>::lowest());
+}
+
+// This is a wrapper to generate return the max or min for a supplied type.
+// If the argument is false, the returned value is the maximum. If true the
+// returned value is the minimum.
+template <typename Dst, typename Src = Dst>
+constexpr Dst CommonMaxOrMin(bool is_min) {
+ return is_min ? CommonMin<Dst, Src>() : CommonMax<Dst, Src>();
+}
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_NUMERICS_SAFE_CONVERSIONS_IMPL_H_
diff --git a/security/sandbox/chromium/base/numerics/safe_math.h b/security/sandbox/chromium/base/numerics/safe_math.h
new file mode 100644
index 0000000000..e30be901f9
--- /dev/null
+++ b/security/sandbox/chromium/base/numerics/safe_math.h
@@ -0,0 +1,12 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NUMERICS_SAFE_MATH_H_
+#define BASE_NUMERICS_SAFE_MATH_H_
+
+#include "base/numerics/checked_math.h"
+#include "base/numerics/clamped_math.h"
+#include "base/numerics/safe_conversions.h"
+
+#endif // BASE_NUMERICS_SAFE_MATH_H_
diff --git a/security/sandbox/chromium/base/numerics/safe_math_arm_impl.h b/security/sandbox/chromium/base/numerics/safe_math_arm_impl.h
new file mode 100644
index 0000000000..ff86bd0b73
--- /dev/null
+++ b/security/sandbox/chromium/base/numerics/safe_math_arm_impl.h
@@ -0,0 +1,122 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NUMERICS_SAFE_MATH_ARM_IMPL_H_
+#define BASE_NUMERICS_SAFE_MATH_ARM_IMPL_H_
+
+#include <cassert>
+#include <limits>
+#include <type_traits>
+
+#include "base/numerics/safe_conversions.h"
+
+namespace base {
+namespace internal {
+
+template <typename T, typename U>
+struct CheckedMulFastAsmOp {
+ static const bool is_supported =
+ FastIntegerArithmeticPromotion<T, U>::is_contained;
+
+ // The following is much more efficient than the Clang and GCC builtins for
+ // performing overflow-checked multiplication when a twice wider type is
+ // available. The below compiles down to 2-3 instructions, depending on the
+ // width of the types in use.
+ // As an example, an int32_t multiply compiles to:
+ // smull r0, r1, r0, r1
+ // cmp r1, r1, asr #31
+ // And an int16_t multiply compiles to:
+ // smulbb r1, r1, r0
+ // asr r2, r1, #16
+ // cmp r2, r1, asr #15
+ template <typename V>
+ __attribute__((always_inline)) static bool Do(T x, U y, V* result) {
+ using Promotion = typename FastIntegerArithmeticPromotion<T, U>::type;
+ Promotion presult;
+
+ presult = static_cast<Promotion>(x) * static_cast<Promotion>(y);
+ *result = static_cast<V>(presult);
+ return IsValueInRangeForNumericType<V>(presult);
+ }
+};
+
+template <typename T, typename U>
+struct ClampedAddFastAsmOp {
+ static const bool is_supported =
+ BigEnoughPromotion<T, U>::is_contained &&
+ IsTypeInRangeForNumericType<
+ int32_t,
+ typename BigEnoughPromotion<T, U>::type>::value;
+
+ template <typename V>
+ __attribute__((always_inline)) static V Do(T x, U y) {
+ // This will get promoted to an int, so let the compiler do whatever is
+ // clever and rely on the saturated cast to bounds check.
+ if (IsIntegerArithmeticSafe<int, T, U>::value)
+ return saturated_cast<V>(x + y);
+
+ int32_t result;
+ int32_t x_i32 = checked_cast<int32_t>(x);
+ int32_t y_i32 = checked_cast<int32_t>(y);
+
+ asm("qadd %[result], %[first], %[second]"
+ : [result] "=r"(result)
+ : [first] "r"(x_i32), [second] "r"(y_i32));
+ return saturated_cast<V>(result);
+ }
+};
+
+template <typename T, typename U>
+struct ClampedSubFastAsmOp {
+ static const bool is_supported =
+ BigEnoughPromotion<T, U>::is_contained &&
+ IsTypeInRangeForNumericType<
+ int32_t,
+ typename BigEnoughPromotion<T, U>::type>::value;
+
+ template <typename V>
+ __attribute__((always_inline)) static V Do(T x, U y) {
+ // This will get promoted to an int, so let the compiler do whatever is
+ // clever and rely on the saturated cast to bounds check.
+ if (IsIntegerArithmeticSafe<int, T, U>::value)
+ return saturated_cast<V>(x - y);
+
+ int32_t result;
+ int32_t x_i32 = checked_cast<int32_t>(x);
+ int32_t y_i32 = checked_cast<int32_t>(y);
+
+ asm("qsub %[result], %[first], %[second]"
+ : [result] "=r"(result)
+ : [first] "r"(x_i32), [second] "r"(y_i32));
+ return saturated_cast<V>(result);
+ }
+};
+
+template <typename T, typename U>
+struct ClampedMulFastAsmOp {
+ static const bool is_supported = CheckedMulFastAsmOp<T, U>::is_supported;
+
+ template <typename V>
+ __attribute__((always_inline)) static V Do(T x, U y) {
+ // Use the CheckedMulFastAsmOp for full-width 32-bit values, because
+ // it's fewer instructions than promoting and then saturating.
+ if (!IsIntegerArithmeticSafe<int32_t, T, U>::value &&
+ !IsIntegerArithmeticSafe<uint32_t, T, U>::value) {
+ V result;
+ if (CheckedMulFastAsmOp<T, U>::Do(x, y, &result))
+ return result;
+ return CommonMaxOrMin<V>(IsValueNegative(x) ^ IsValueNegative(y));
+ }
+
+ assert((FastIntegerArithmeticPromotion<T, U>::is_contained));
+ using Promotion = typename FastIntegerArithmeticPromotion<T, U>::type;
+ return saturated_cast<V>(static_cast<Promotion>(x) *
+ static_cast<Promotion>(y));
+ }
+};
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_NUMERICS_SAFE_MATH_ARM_IMPL_H_
diff --git a/security/sandbox/chromium/base/numerics/safe_math_clang_gcc_impl.h b/security/sandbox/chromium/base/numerics/safe_math_clang_gcc_impl.h
new file mode 100644
index 0000000000..1760338b08
--- /dev/null
+++ b/security/sandbox/chromium/base/numerics/safe_math_clang_gcc_impl.h
@@ -0,0 +1,157 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NUMERICS_SAFE_MATH_CLANG_GCC_IMPL_H_
+#define BASE_NUMERICS_SAFE_MATH_CLANG_GCC_IMPL_H_
+
+#include <cassert>
+#include <limits>
+#include <type_traits>
+
+#include "base/numerics/safe_conversions.h"
+
+#if !defined(__native_client__) && (defined(__ARMEL__) || defined(__arch64__))
+#include "base/numerics/safe_math_arm_impl.h"
+#define BASE_HAS_ASSEMBLER_SAFE_MATH (1)
+#else
+#define BASE_HAS_ASSEMBLER_SAFE_MATH (0)
+#endif
+
+namespace base {
+namespace internal {
+
+// These are the non-functioning boilerplate implementations of the optimized
+// safe math routines.
+#if !BASE_HAS_ASSEMBLER_SAFE_MATH
+template <typename T, typename U>
+struct CheckedMulFastAsmOp {
+ static const bool is_supported = false;
+ template <typename V>
+ static constexpr bool Do(T, U, V*) {
+ // Force a compile failure if instantiated.
+ return CheckOnFailure::template HandleFailure<bool>();
+ }
+};
+
+template <typename T, typename U>
+struct ClampedAddFastAsmOp {
+ static const bool is_supported = false;
+ template <typename V>
+ static constexpr V Do(T, U) {
+ // Force a compile failure if instantiated.
+ return CheckOnFailure::template HandleFailure<V>();
+ }
+};
+
+template <typename T, typename U>
+struct ClampedSubFastAsmOp {
+ static const bool is_supported = false;
+ template <typename V>
+ static constexpr V Do(T, U) {
+ // Force a compile failure if instantiated.
+ return CheckOnFailure::template HandleFailure<V>();
+ }
+};
+
+template <typename T, typename U>
+struct ClampedMulFastAsmOp {
+ static const bool is_supported = false;
+ template <typename V>
+ static constexpr V Do(T, U) {
+ // Force a compile failure if instantiated.
+ return CheckOnFailure::template HandleFailure<V>();
+ }
+};
+#endif // BASE_HAS_ASSEMBLER_SAFE_MATH
+#undef BASE_HAS_ASSEMBLER_SAFE_MATH
+
+template <typename T, typename U>
+struct CheckedAddFastOp {
+ static const bool is_supported = true;
+ template <typename V>
+ __attribute__((always_inline)) static constexpr bool Do(T x, U y, V* result) {
+ return !__builtin_add_overflow(x, y, result);
+ }
+};
+
+template <typename T, typename U>
+struct CheckedSubFastOp {
+ static const bool is_supported = true;
+ template <typename V>
+ __attribute__((always_inline)) static constexpr bool Do(T x, U y, V* result) {
+ return !__builtin_sub_overflow(x, y, result);
+ }
+};
+
+template <typename T, typename U>
+struct CheckedMulFastOp {
+#if defined(__clang__)
+ // TODO(jschuh): Get the Clang runtime library issues sorted out so we can
+ // support full-width, mixed-sign multiply builtins.
+ // https://crbug.com/613003
+ // We can support intptr_t, uintptr_t, or a smaller common type.
+ static const bool is_supported =
+ (IsTypeInRangeForNumericType<intptr_t, T>::value &&
+ IsTypeInRangeForNumericType<intptr_t, U>::value) ||
+ (IsTypeInRangeForNumericType<uintptr_t, T>::value &&
+ IsTypeInRangeForNumericType<uintptr_t, U>::value);
+#else
+ static const bool is_supported = true;
+#endif
+ template <typename V>
+ __attribute__((always_inline)) static constexpr bool Do(T x, U y, V* result) {
+ return CheckedMulFastAsmOp<T, U>::is_supported
+ ? CheckedMulFastAsmOp<T, U>::Do(x, y, result)
+ : !__builtin_mul_overflow(x, y, result);
+ }
+};
+
+template <typename T, typename U>
+struct ClampedAddFastOp {
+ static const bool is_supported = ClampedAddFastAsmOp<T, U>::is_supported;
+ template <typename V>
+ __attribute__((always_inline)) static V Do(T x, U y) {
+ return ClampedAddFastAsmOp<T, U>::template Do<V>(x, y);
+ }
+};
+
+template <typename T, typename U>
+struct ClampedSubFastOp {
+ static const bool is_supported = ClampedSubFastAsmOp<T, U>::is_supported;
+ template <typename V>
+ __attribute__((always_inline)) static V Do(T x, U y) {
+ return ClampedSubFastAsmOp<T, U>::template Do<V>(x, y);
+ }
+};
+
+template <typename T, typename U>
+struct ClampedMulFastOp {
+ static const bool is_supported = ClampedMulFastAsmOp<T, U>::is_supported;
+ template <typename V>
+ __attribute__((always_inline)) static V Do(T x, U y) {
+ return ClampedMulFastAsmOp<T, U>::template Do<V>(x, y);
+ }
+};
+
+template <typename T>
+struct ClampedNegFastOp {
+ static const bool is_supported = std::is_signed<T>::value;
+ __attribute__((always_inline)) static T Do(T value) {
+ // Use this when there is no assembler path available.
+ if (!ClampedSubFastAsmOp<T, T>::is_supported) {
+ T result;
+ return !__builtin_sub_overflow(T(0), value, &result)
+ ? result
+ : std::numeric_limits<T>::max();
+ }
+
+ // Fallback to the normal subtraction path.
+ return ClampedSubFastOp<T, T>::template Do<T>(T(0), value);
+ }
+};
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_NUMERICS_SAFE_MATH_CLANG_GCC_IMPL_H_
diff --git a/security/sandbox/chromium/base/numerics/safe_math_shared_impl.h b/security/sandbox/chromium/base/numerics/safe_math_shared_impl.h
new file mode 100644
index 0000000000..3556b1ea81
--- /dev/null
+++ b/security/sandbox/chromium/base/numerics/safe_math_shared_impl.h
@@ -0,0 +1,240 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NUMERICS_SAFE_MATH_SHARED_IMPL_H_
+#define BASE_NUMERICS_SAFE_MATH_SHARED_IMPL_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <cassert>
+#include <climits>
+#include <cmath>
+#include <cstdlib>
+#include <limits>
+#include <type_traits>
+
+#include "base/numerics/safe_conversions.h"
+
+#ifdef __asmjs__
+// Optimized safe math instructions are incompatible with asmjs.
+#define BASE_HAS_OPTIMIZED_SAFE_MATH (0)
+// Where available use builtin math overflow support on Clang and GCC.
+#elif !defined(__native_client__) && \
+ ((defined(__clang__) && \
+ ((__clang_major__ > 3) || \
+ (__clang_major__ == 3 && __clang_minor__ >= 4))) || \
+ (defined(__GNUC__) && __GNUC__ >= 5))
+#include "base/numerics/safe_math_clang_gcc_impl.h"
+#define BASE_HAS_OPTIMIZED_SAFE_MATH (1)
+#else
+#define BASE_HAS_OPTIMIZED_SAFE_MATH (0)
+#endif
+
+namespace base {
+namespace internal {
+
+// These are the non-functioning boilerplate implementations of the optimized
+// safe math routines.
+#if !BASE_HAS_OPTIMIZED_SAFE_MATH
+template <typename T, typename U>
+struct CheckedAddFastOp {
+ static const bool is_supported = false;
+ template <typename V>
+ static constexpr bool Do(T, U, V*) {
+ // Force a compile failure if instantiated.
+ return CheckOnFailure::template HandleFailure<bool>();
+ }
+};
+
+template <typename T, typename U>
+struct CheckedSubFastOp {
+ static const bool is_supported = false;
+ template <typename V>
+ static constexpr bool Do(T, U, V*) {
+ // Force a compile failure if instantiated.
+ return CheckOnFailure::template HandleFailure<bool>();
+ }
+};
+
+template <typename T, typename U>
+struct CheckedMulFastOp {
+ static const bool is_supported = false;
+ template <typename V>
+ static constexpr bool Do(T, U, V*) {
+ // Force a compile failure if instantiated.
+ return CheckOnFailure::template HandleFailure<bool>();
+ }
+};
+
+template <typename T, typename U>
+struct ClampedAddFastOp {
+ static const bool is_supported = false;
+ template <typename V>
+ static constexpr V Do(T, U) {
+ // Force a compile failure if instantiated.
+ return CheckOnFailure::template HandleFailure<V>();
+ }
+};
+
+template <typename T, typename U>
+struct ClampedSubFastOp {
+ static const bool is_supported = false;
+ template <typename V>
+ static constexpr V Do(T, U) {
+ // Force a compile failure if instantiated.
+ return CheckOnFailure::template HandleFailure<V>();
+ }
+};
+
+template <typename T, typename U>
+struct ClampedMulFastOp {
+ static const bool is_supported = false;
+ template <typename V>
+ static constexpr V Do(T, U) {
+ // Force a compile failure if instantiated.
+ return CheckOnFailure::template HandleFailure<V>();
+ }
+};
+
+template <typename T>
+struct ClampedNegFastOp {
+ static const bool is_supported = false;
+ static constexpr T Do(T) {
+ // Force a compile failure if instantiated.
+ return CheckOnFailure::template HandleFailure<T>();
+ }
+};
+#endif // BASE_HAS_OPTIMIZED_SAFE_MATH
+#undef BASE_HAS_OPTIMIZED_SAFE_MATH
+
+// This is used for UnsignedAbs, where we need to support floating-point
+// template instantiations even though we don't actually support the operations.
+// However, there is no corresponding implementation of e.g. SafeUnsignedAbs,
+// so the float versions will not compile.
+template <typename Numeric,
+ bool IsInteger = std::is_integral<Numeric>::value,
+ bool IsFloat = std::is_floating_point<Numeric>::value>
+struct UnsignedOrFloatForSize;
+
+template <typename Numeric>
+struct UnsignedOrFloatForSize<Numeric, true, false> {
+ using type = typename std::make_unsigned<Numeric>::type;
+};
+
+template <typename Numeric>
+struct UnsignedOrFloatForSize<Numeric, false, true> {
+ using type = Numeric;
+};
+
+// Wrap the unary operations to allow SFINAE when instantiating integrals versus
+// floating points. These don't perform any overflow checking. Rather, they
+// exhibit well-defined overflow semantics and rely on the caller to detect
+// if an overflow occured.
+
+template <typename T,
+ typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
+constexpr T NegateWrapper(T value) {
+ using UnsignedT = typename std::make_unsigned<T>::type;
+ // This will compile to a NEG on Intel, and is normal negation on ARM.
+ return static_cast<T>(UnsignedT(0) - static_cast<UnsignedT>(value));
+}
+
+template <
+ typename T,
+ typename std::enable_if<std::is_floating_point<T>::value>::type* = nullptr>
+constexpr T NegateWrapper(T value) {
+ return -value;
+}
+
+template <typename T,
+ typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
+constexpr typename std::make_unsigned<T>::type InvertWrapper(T value) {
+ return ~value;
+}
+
+template <typename T,
+ typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
+constexpr T AbsWrapper(T value) {
+ return static_cast<T>(SafeUnsignedAbs(value));
+}
+
+template <
+ typename T,
+ typename std::enable_if<std::is_floating_point<T>::value>::type* = nullptr>
+constexpr T AbsWrapper(T value) {
+ return value < 0 ? -value : value;
+}
+
+template <template <typename, typename, typename> class M,
+ typename L,
+ typename R>
+struct MathWrapper {
+ using math = M<typename UnderlyingType<L>::type,
+ typename UnderlyingType<R>::type,
+ void>;
+ using type = typename math::result_type;
+};
+
+// These variadic templates work out the return types.
+// TODO(jschuh): Rip all this out once we have C++14 non-trailing auto support.
+template <template <typename, typename, typename> class M,
+ typename L,
+ typename R,
+ typename... Args>
+struct ResultType;
+
+template <template <typename, typename, typename> class M,
+ typename L,
+ typename R>
+struct ResultType<M, L, R> {
+ using type = typename MathWrapper<M, L, R>::type;
+};
+
+template <template <typename, typename, typename> class M,
+ typename L,
+ typename R,
+ typename... Args>
+struct ResultType {
+ using type =
+ typename ResultType<M, typename ResultType<M, L, R>::type, Args...>::type;
+};
+
+// The following macros are just boilerplate for the standard arithmetic
+// operator overloads and variadic function templates. A macro isn't the nicest
+// solution, but it beats rewriting these over and over again.
+#define BASE_NUMERIC_ARITHMETIC_VARIADIC(CLASS, CL_ABBR, OP_NAME) \
+ template <typename L, typename R, typename... Args> \
+ constexpr CLASS##Numeric< \
+ typename ResultType<CLASS##OP_NAME##Op, L, R, Args...>::type> \
+ CL_ABBR##OP_NAME(const L lhs, const R rhs, const Args... args) { \
+ return CL_ABBR##MathOp<CLASS##OP_NAME##Op, L, R, Args...>(lhs, rhs, \
+ args...); \
+ }
+
+#define BASE_NUMERIC_ARITHMETIC_OPERATORS(CLASS, CL_ABBR, OP_NAME, OP, CMP_OP) \
+ /* Binary arithmetic operator for all CLASS##Numeric operations. */ \
+ template <typename L, typename R, \
+ typename std::enable_if<Is##CLASS##Op<L, R>::value>::type* = \
+ nullptr> \
+ constexpr CLASS##Numeric< \
+ typename MathWrapper<CLASS##OP_NAME##Op, L, R>::type> \
+ operator OP(const L lhs, const R rhs) { \
+ return decltype(lhs OP rhs)::template MathOp<CLASS##OP_NAME##Op>(lhs, \
+ rhs); \
+ } \
+ /* Assignment arithmetic operator implementation from CLASS##Numeric. */ \
+ template <typename L> \
+ template <typename R> \
+ constexpr CLASS##Numeric<L>& CLASS##Numeric<L>::operator CMP_OP( \
+ const R rhs) { \
+ return MathOp<CLASS##OP_NAME##Op>(rhs); \
+ } \
+ /* Variadic arithmetic functions that return CLASS##Numeric. */ \
+ BASE_NUMERIC_ARITHMETIC_VARIADIC(CLASS, CL_ABBR, OP_NAME)
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_NUMERICS_SAFE_MATH_SHARED_IMPL_H_
diff --git a/security/sandbox/chromium/base/optional.h b/security/sandbox/chromium/base/optional.h
new file mode 100644
index 0000000000..36ae36fc98
--- /dev/null
+++ b/security/sandbox/chromium/base/optional.h
@@ -0,0 +1,937 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_OPTIONAL_H_
+#define BASE_OPTIONAL_H_
+
+#include <functional>
+#include <type_traits>
+#include <utility>
+
+#include "base/logging.h"
+#include "base/template_util.h"
+
+namespace base {
+
+// Specification:
+// http://en.cppreference.com/w/cpp/utility/optional/nullopt_t
+struct nullopt_t {
+ constexpr explicit nullopt_t(int) {}
+};
+
+// Specification:
+// http://en.cppreference.com/w/cpp/utility/optional/nullopt
+constexpr nullopt_t nullopt(0);
+
+// Forward declaration, which is refered by following helpers.
+template <typename T>
+class Optional;
+
+namespace internal {
+
+template <typename T, bool = std::is_trivially_destructible<T>::value>
+struct OptionalStorageBase {
+ // Initializing |empty_| here instead of using default member initializing
+ // to avoid errors in g++ 4.8.
+ constexpr OptionalStorageBase() : empty_('\0') {}
+
+ template <class... Args>
+ constexpr explicit OptionalStorageBase(in_place_t, Args&&... args)
+ : is_populated_(true), value_(std::forward<Args>(args)...) {}
+
+ // When T is not trivially destructible we must call its
+ // destructor before deallocating its memory.
+ // Note that this hides the (implicitly declared) move constructor, which
+ // would be used for constexpr move constructor in OptionalStorage<T>.
+ // It is needed iff T is trivially move constructible. However, the current
+ // is_trivially_{copy,move}_constructible implementation requires
+ // is_trivially_destructible (which looks a bug, cf:
+ // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=51452 and
+ // http://cplusplus.github.io/LWG/lwg-active.html#2116), so it is not
+ // necessary for this case at the moment. Please see also the destructor
+ // comment in "is_trivially_destructible = true" specialization below.
+ ~OptionalStorageBase() {
+ if (is_populated_)
+ value_.~T();
+ }
+
+ template <class... Args>
+ void Init(Args&&... args) {
+ DCHECK(!is_populated_);
+ ::new (&value_) T(std::forward<Args>(args)...);
+ is_populated_ = true;
+ }
+
+ bool is_populated_ = false;
+ union {
+ // |empty_| exists so that the union will always be initialized, even when
+ // it doesn't contain a value. Union members must be initialized for the
+ // constructor to be 'constexpr'.
+ char empty_;
+ T value_;
+ };
+};
+
+template <typename T>
+struct OptionalStorageBase<T, true /* trivially destructible */> {
+ // Initializing |empty_| here instead of using default member initializing
+ // to avoid errors in g++ 4.8.
+ constexpr OptionalStorageBase() : empty_('\0') {}
+
+ template <class... Args>
+ constexpr explicit OptionalStorageBase(in_place_t, Args&&... args)
+ : is_populated_(true), value_(std::forward<Args>(args)...) {}
+
+ // When T is trivially destructible (i.e. its destructor does nothing) there
+ // is no need to call it. Implicitly defined destructor is trivial, because
+ // both members (bool and union containing only variants which are trivially
+ // destructible) are trivially destructible.
+ // Explicitly-defaulted destructor is also trivial, but do not use it here,
+ // because it hides the implicit move constructor. It is needed to implement
+ // constexpr move constructor in OptionalStorage iff T is trivially move
+ // constructible. Note that, if T is trivially move constructible, the move
+ // constructor of OptionalStorageBase<T> is also implicitly defined and it is
+ // trivially move constructor. If T is not trivially move constructible,
+ // "not declaring move constructor without destructor declaration" here means
+ // "delete move constructor", which works because any move constructor of
+ // OptionalStorage will not refer to it in that case.
+
+ template <class... Args>
+ void Init(Args&&... args) {
+ DCHECK(!is_populated_);
+ ::new (&value_) T(std::forward<Args>(args)...);
+ is_populated_ = true;
+ }
+
+ bool is_populated_ = false;
+ union {
+ // |empty_| exists so that the union will always be initialized, even when
+ // it doesn't contain a value. Union members must be initialized for the
+ // constructor to be 'constexpr'.
+ char empty_;
+ T value_;
+ };
+};
+
+// Implement conditional constexpr copy and move constructors. These are
+// constexpr if is_trivially_{copy,move}_constructible<T>::value is true
+// respectively. If each is true, the corresponding constructor is defined as
+// "= default;", which generates a constexpr constructor (In this case,
+// the condition of constexpr-ness is satisfied because the base class also has
+// compiler generated constexpr {copy,move} constructors). Note that
+// placement-new is prohibited in constexpr.
+template <typename T,
+ bool = is_trivially_copy_constructible<T>::value,
+ bool = std::is_trivially_move_constructible<T>::value>
+struct OptionalStorage : OptionalStorageBase<T> {
+ // This is no trivially {copy,move} constructible case. Other cases are
+ // defined below as specializations.
+
+ // Accessing the members of template base class requires explicit
+ // declaration.
+ using OptionalStorageBase<T>::is_populated_;
+ using OptionalStorageBase<T>::value_;
+ using OptionalStorageBase<T>::Init;
+
+ // Inherit constructors (specifically, the in_place constructor).
+ using OptionalStorageBase<T>::OptionalStorageBase;
+
+ // User defined constructor deletes the default constructor.
+ // Define it explicitly.
+ OptionalStorage() = default;
+
+ OptionalStorage(const OptionalStorage& other) {
+ if (other.is_populated_)
+ Init(other.value_);
+ }
+
+ OptionalStorage(OptionalStorage&& other) noexcept(
+ std::is_nothrow_move_constructible<T>::value) {
+ if (other.is_populated_)
+ Init(std::move(other.value_));
+ }
+};
+
+template <typename T>
+struct OptionalStorage<T,
+ true /* trivially copy constructible */,
+ false /* trivially move constructible */>
+ : OptionalStorageBase<T> {
+ using OptionalStorageBase<T>::is_populated_;
+ using OptionalStorageBase<T>::value_;
+ using OptionalStorageBase<T>::Init;
+ using OptionalStorageBase<T>::OptionalStorageBase;
+
+ OptionalStorage() = default;
+ OptionalStorage(const OptionalStorage& other) = default;
+
+ OptionalStorage(OptionalStorage&& other) noexcept(
+ std::is_nothrow_move_constructible<T>::value) {
+ if (other.is_populated_)
+ Init(std::move(other.value_));
+ }
+};
+
+template <typename T>
+struct OptionalStorage<T,
+ false /* trivially copy constructible */,
+ true /* trivially move constructible */>
+ : OptionalStorageBase<T> {
+ using OptionalStorageBase<T>::is_populated_;
+ using OptionalStorageBase<T>::value_;
+ using OptionalStorageBase<T>::Init;
+ using OptionalStorageBase<T>::OptionalStorageBase;
+
+ OptionalStorage() = default;
+ OptionalStorage(OptionalStorage&& other) = default;
+
+ OptionalStorage(const OptionalStorage& other) {
+ if (other.is_populated_)
+ Init(other.value_);
+ }
+};
+
+template <typename T>
+struct OptionalStorage<T,
+ true /* trivially copy constructible */,
+ true /* trivially move constructible */>
+ : OptionalStorageBase<T> {
+ // If both trivially {copy,move} constructible are true, it is not necessary
+ // to use user-defined constructors. So, just inheriting constructors
+ // from the base class works.
+ using OptionalStorageBase<T>::OptionalStorageBase;
+};
+
+// Base class to support conditionally usable copy-/move- constructors
+// and assign operators.
+template <typename T>
+class OptionalBase {
+ // This class provides implementation rather than public API, so everything
+ // should be hidden. Often we use composition, but we cannot in this case
+ // because of C++ language restriction.
+ protected:
+ constexpr OptionalBase() = default;
+ constexpr OptionalBase(const OptionalBase& other) = default;
+ constexpr OptionalBase(OptionalBase&& other) = default;
+
+ template <class... Args>
+ constexpr explicit OptionalBase(in_place_t, Args&&... args)
+ : storage_(in_place, std::forward<Args>(args)...) {}
+
+ // Implementation of converting constructors.
+ template <typename U>
+ explicit OptionalBase(const OptionalBase<U>& other) {
+ if (other.storage_.is_populated_)
+ storage_.Init(other.storage_.value_);
+ }
+
+ template <typename U>
+ explicit OptionalBase(OptionalBase<U>&& other) {
+ if (other.storage_.is_populated_)
+ storage_.Init(std::move(other.storage_.value_));
+ }
+
+ ~OptionalBase() = default;
+
+ OptionalBase& operator=(const OptionalBase& other) {
+ CopyAssign(other);
+ return *this;
+ }
+
+ OptionalBase& operator=(OptionalBase&& other) noexcept(
+ std::is_nothrow_move_assignable<T>::value&&
+ std::is_nothrow_move_constructible<T>::value) {
+ MoveAssign(std::move(other));
+ return *this;
+ }
+
+ template <typename U>
+ void CopyAssign(const OptionalBase<U>& other) {
+ if (other.storage_.is_populated_)
+ InitOrAssign(other.storage_.value_);
+ else
+ FreeIfNeeded();
+ }
+
+ template <typename U>
+ void MoveAssign(OptionalBase<U>&& other) {
+ if (other.storage_.is_populated_)
+ InitOrAssign(std::move(other.storage_.value_));
+ else
+ FreeIfNeeded();
+ }
+
+ template <typename U>
+ void InitOrAssign(U&& value) {
+ if (storage_.is_populated_)
+ storage_.value_ = std::forward<U>(value);
+ else
+ storage_.Init(std::forward<U>(value));
+ }
+
+ void FreeIfNeeded() {
+ if (!storage_.is_populated_)
+ return;
+ storage_.value_.~T();
+ storage_.is_populated_ = false;
+ }
+
+ // For implementing conversion, allow access to other typed OptionalBase
+ // class.
+ template <typename U>
+ friend class OptionalBase;
+
+ OptionalStorage<T> storage_;
+};
+
+// The following {Copy,Move}{Constructible,Assignable} structs are helpers to
+// implement constructor/assign-operator overloading. Specifically, if T is
+// is not movable but copyable, Optional<T>'s move constructor should not
+// participate in overload resolution. This inheritance trick implements that.
+template <bool is_copy_constructible>
+struct CopyConstructible {};
+
+template <>
+struct CopyConstructible<false> {
+ constexpr CopyConstructible() = default;
+ constexpr CopyConstructible(const CopyConstructible&) = delete;
+ constexpr CopyConstructible(CopyConstructible&&) = default;
+ CopyConstructible& operator=(const CopyConstructible&) = default;
+ CopyConstructible& operator=(CopyConstructible&&) = default;
+};
+
+template <bool is_move_constructible>
+struct MoveConstructible {};
+
+template <>
+struct MoveConstructible<false> {
+ constexpr MoveConstructible() = default;
+ constexpr MoveConstructible(const MoveConstructible&) = default;
+ constexpr MoveConstructible(MoveConstructible&&) = delete;
+ MoveConstructible& operator=(const MoveConstructible&) = default;
+ MoveConstructible& operator=(MoveConstructible&&) = default;
+};
+
+template <bool is_copy_assignable>
+struct CopyAssignable {};
+
+template <>
+struct CopyAssignable<false> {
+ constexpr CopyAssignable() = default;
+ constexpr CopyAssignable(const CopyAssignable&) = default;
+ constexpr CopyAssignable(CopyAssignable&&) = default;
+ CopyAssignable& operator=(const CopyAssignable&) = delete;
+ CopyAssignable& operator=(CopyAssignable&&) = default;
+};
+
+template <bool is_move_assignable>
+struct MoveAssignable {};
+
+template <>
+struct MoveAssignable<false> {
+ constexpr MoveAssignable() = default;
+ constexpr MoveAssignable(const MoveAssignable&) = default;
+ constexpr MoveAssignable(MoveAssignable&&) = default;
+ MoveAssignable& operator=(const MoveAssignable&) = default;
+ MoveAssignable& operator=(MoveAssignable&&) = delete;
+};
+
+// Helper to conditionally enable converting constructors and assign operators.
+template <typename T, typename U>
+struct IsConvertibleFromOptional
+ : std::integral_constant<
+ bool,
+ std::is_constructible<T, Optional<U>&>::value ||
+ std::is_constructible<T, const Optional<U>&>::value ||
+ std::is_constructible<T, Optional<U>&&>::value ||
+ std::is_constructible<T, const Optional<U>&&>::value ||
+ std::is_convertible<Optional<U>&, T>::value ||
+ std::is_convertible<const Optional<U>&, T>::value ||
+ std::is_convertible<Optional<U>&&, T>::value ||
+ std::is_convertible<const Optional<U>&&, T>::value> {};
+
+template <typename T, typename U>
+struct IsAssignableFromOptional
+ : std::integral_constant<
+ bool,
+ IsConvertibleFromOptional<T, U>::value ||
+ std::is_assignable<T&, Optional<U>&>::value ||
+ std::is_assignable<T&, const Optional<U>&>::value ||
+ std::is_assignable<T&, Optional<U>&&>::value ||
+ std::is_assignable<T&, const Optional<U>&&>::value> {};
+
+// Forward compatibility for C++17.
+// Introduce one more deeper nested namespace to avoid leaking using std::swap.
+namespace swappable_impl {
+using std::swap;
+
+struct IsSwappableImpl {
+ // Tests if swap can be called. Check<T&>(0) returns true_type iff swap
+ // is available for T. Otherwise, Check's overload resolution falls back
+ // to Check(...) declared below thanks to SFINAE, so returns false_type.
+ template <typename T>
+ static auto Check(int)
+ -> decltype(swap(std::declval<T>(), std::declval<T>()), std::true_type());
+
+ template <typename T>
+ static std::false_type Check(...);
+};
+} // namespace swappable_impl
+
+template <typename T>
+struct IsSwappable : decltype(swappable_impl::IsSwappableImpl::Check<T&>(0)) {};
+
+// Forward compatibility for C++20.
+template <typename T>
+using RemoveCvRefT = std::remove_cv_t<std::remove_reference_t<T>>;
+
+} // namespace internal
+
+// On Windows, by default, empty-base class optimization does not work,
+// which means even if the base class is empty struct, it still consumes one
+// byte for its body. __declspec(empty_bases) enables the optimization.
+// cf)
+// https://blogs.msdn.microsoft.com/vcblog/2016/03/30/optimizing-the-layout-of-empty-base-classes-in-vs2015-update-2-3/
+#ifdef OS_WIN
+#define OPTIONAL_DECLSPEC_EMPTY_BASES __declspec(empty_bases)
+#else
+#define OPTIONAL_DECLSPEC_EMPTY_BASES
+#endif
+
+// base::Optional is a Chromium version of the C++17 optional class:
+// std::optional documentation:
+// http://en.cppreference.com/w/cpp/utility/optional
+// Chromium documentation:
+// https://chromium.googlesource.com/chromium/src/+/master/docs/optional.md
+//
+// These are the differences between the specification and the implementation:
+// - Constructors do not use 'constexpr' as it is a C++14 extension.
+// - 'constexpr' might be missing in some places for reasons specified locally.
+// - No exceptions are thrown, because they are banned from Chromium.
+// Marked noexcept for only move constructor and move assign operators.
+// - All the non-members are in the 'base' namespace instead of 'std'.
+//
+// Note that T cannot have a constructor T(Optional<T>) etc. Optional<T> checks
+// T's constructor (specifically via IsConvertibleFromOptional), and in the
+// check whether T can be constructible from Optional<T>, which is recursive
+// so it does not work. As of Feb 2018, std::optional C++17 implementation in
+// both clang and gcc has same limitation. MSVC SFINAE looks to have different
+// behavior, but anyway it reports an error, too.
+template <typename T>
+class OPTIONAL_DECLSPEC_EMPTY_BASES Optional
+ : public internal::OptionalBase<T>,
+ public internal::CopyConstructible<std::is_copy_constructible<T>::value>,
+ public internal::MoveConstructible<std::is_move_constructible<T>::value>,
+ public internal::CopyAssignable<std::is_copy_constructible<T>::value &&
+ std::is_copy_assignable<T>::value>,
+ public internal::MoveAssignable<std::is_move_constructible<T>::value &&
+ std::is_move_assignable<T>::value> {
+ private:
+ // Disable some versions of T that are ill-formed.
+ // See: https://timsong-cpp.github.io/cppwp/n4659/optional#syn-1
+ static_assert(
+ !std::is_same<internal::RemoveCvRefT<T>, in_place_t>::value,
+ "instantiation of base::Optional with in_place_t is ill-formed");
+ static_assert(!std::is_same<internal::RemoveCvRefT<T>, nullopt_t>::value,
+ "instantiation of base::Optional with nullopt_t is ill-formed");
+ static_assert(
+ !std::is_reference<T>::value,
+ "instantiation of base::Optional with a reference type is ill-formed");
+ // See: https://timsong-cpp.github.io/cppwp/n4659/optional#optional-3
+ static_assert(std::is_destructible<T>::value,
+ "instantiation of base::Optional with a non-destructible type "
+ "is ill-formed");
+ // Arrays are explicitly disallowed because for arrays of known bound
+ // is_destructible is of undefined value.
+ // See: https://en.cppreference.com/w/cpp/types/is_destructible
+ static_assert(
+ !std::is_array<T>::value,
+ "instantiation of base::Optional with an array type is ill-formed");
+
+ public:
+#undef OPTIONAL_DECLSPEC_EMPTY_BASES
+ using value_type = T;
+
+ // Defer default/copy/move constructor implementation to OptionalBase.
+ constexpr Optional() = default;
+ constexpr Optional(const Optional& other) = default;
+ constexpr Optional(Optional&& other) noexcept(
+ std::is_nothrow_move_constructible<T>::value) = default;
+
+ constexpr Optional(nullopt_t) {} // NOLINT(runtime/explicit)
+
+ // Converting copy constructor. "explicit" only if
+ // std::is_convertible<const U&, T>::value is false. It is implemented by
+ // declaring two almost same constructors, but that condition in enable_if_t
+ // is different, so that either one is chosen, thanks to SFINAE.
+ template <
+ typename U,
+ std::enable_if_t<std::is_constructible<T, const U&>::value &&
+ !internal::IsConvertibleFromOptional<T, U>::value &&
+ std::is_convertible<const U&, T>::value,
+ bool> = false>
+ Optional(const Optional<U>& other) : internal::OptionalBase<T>(other) {}
+
+ template <
+ typename U,
+ std::enable_if_t<std::is_constructible<T, const U&>::value &&
+ !internal::IsConvertibleFromOptional<T, U>::value &&
+ !std::is_convertible<const U&, T>::value,
+ bool> = false>
+ explicit Optional(const Optional<U>& other)
+ : internal::OptionalBase<T>(other) {}
+
+ // Converting move constructor. Similar to converting copy constructor,
+ // declaring two (explicit and non-explicit) constructors.
+ template <
+ typename U,
+ std::enable_if_t<std::is_constructible<T, U&&>::value &&
+ !internal::IsConvertibleFromOptional<T, U>::value &&
+ std::is_convertible<U&&, T>::value,
+ bool> = false>
+ Optional(Optional<U>&& other) : internal::OptionalBase<T>(std::move(other)) {}
+
+ template <
+ typename U,
+ std::enable_if_t<std::is_constructible<T, U&&>::value &&
+ !internal::IsConvertibleFromOptional<T, U>::value &&
+ !std::is_convertible<U&&, T>::value,
+ bool> = false>
+ explicit Optional(Optional<U>&& other)
+ : internal::OptionalBase<T>(std::move(other)) {}
+
+ template <class... Args>
+ constexpr explicit Optional(in_place_t, Args&&... args)
+ : internal::OptionalBase<T>(in_place, std::forward<Args>(args)...) {}
+
+ template <
+ class U,
+ class... Args,
+ class = std::enable_if_t<std::is_constructible<value_type,
+ std::initializer_list<U>&,
+ Args...>::value>>
+ constexpr explicit Optional(in_place_t,
+ std::initializer_list<U> il,
+ Args&&... args)
+ : internal::OptionalBase<T>(in_place, il, std::forward<Args>(args)...) {}
+
+ // Forward value constructor. Similar to converting constructors,
+ // conditionally explicit.
+ template <
+ typename U = value_type,
+ std::enable_if_t<
+ std::is_constructible<T, U&&>::value &&
+ !std::is_same<internal::RemoveCvRefT<U>, in_place_t>::value &&
+ !std::is_same<internal::RemoveCvRefT<U>, Optional<T>>::value &&
+ std::is_convertible<U&&, T>::value,
+ bool> = false>
+ constexpr Optional(U&& value)
+ : internal::OptionalBase<T>(in_place, std::forward<U>(value)) {}
+
+ template <
+ typename U = value_type,
+ std::enable_if_t<
+ std::is_constructible<T, U&&>::value &&
+ !std::is_same<internal::RemoveCvRefT<U>, in_place_t>::value &&
+ !std::is_same<internal::RemoveCvRefT<U>, Optional<T>>::value &&
+ !std::is_convertible<U&&, T>::value,
+ bool> = false>
+ constexpr explicit Optional(U&& value)
+ : internal::OptionalBase<T>(in_place, std::forward<U>(value)) {}
+
+ ~Optional() = default;
+
+ // Defer copy-/move- assign operator implementation to OptionalBase.
+ Optional& operator=(const Optional& other) = default;
+ Optional& operator=(Optional&& other) noexcept(
+ std::is_nothrow_move_assignable<T>::value&&
+ std::is_nothrow_move_constructible<T>::value) = default;
+
+ Optional& operator=(nullopt_t) {
+ FreeIfNeeded();
+ return *this;
+ }
+
+ // Perfect-forwarded assignment.
+ template <typename U>
+ std::enable_if_t<
+ !std::is_same<internal::RemoveCvRefT<U>, Optional<T>>::value &&
+ std::is_constructible<T, U>::value &&
+ std::is_assignable<T&, U>::value &&
+ (!std::is_scalar<T>::value ||
+ !std::is_same<std::decay_t<U>, T>::value),
+ Optional&>
+ operator=(U&& value) {
+ InitOrAssign(std::forward<U>(value));
+ return *this;
+ }
+
+ // Copy assign the state of other.
+ template <typename U>
+ std::enable_if_t<!internal::IsAssignableFromOptional<T, U>::value &&
+ std::is_constructible<T, const U&>::value &&
+ std::is_assignable<T&, const U&>::value,
+ Optional&>
+ operator=(const Optional<U>& other) {
+ CopyAssign(other);
+ return *this;
+ }
+
+ // Move assign the state of other.
+ template <typename U>
+ std::enable_if_t<!internal::IsAssignableFromOptional<T, U>::value &&
+ std::is_constructible<T, U>::value &&
+ std::is_assignable<T&, U>::value,
+ Optional&>
+ operator=(Optional<U>&& other) {
+ MoveAssign(std::move(other));
+ return *this;
+ }
+
+ constexpr const T* operator->() const {
+ CHECK(storage_.is_populated_);
+ return &storage_.value_;
+ }
+
+ constexpr T* operator->() {
+ CHECK(storage_.is_populated_);
+ return &storage_.value_;
+ }
+
+ constexpr const T& operator*() const & {
+ CHECK(storage_.is_populated_);
+ return storage_.value_;
+ }
+
+ constexpr T& operator*() & {
+ CHECK(storage_.is_populated_);
+ return storage_.value_;
+ }
+
+ constexpr const T&& operator*() const && {
+ CHECK(storage_.is_populated_);
+ return std::move(storage_.value_);
+ }
+
+ constexpr T&& operator*() && {
+ CHECK(storage_.is_populated_);
+ return std::move(storage_.value_);
+ }
+
+ constexpr explicit operator bool() const { return storage_.is_populated_; }
+
+ constexpr bool has_value() const { return storage_.is_populated_; }
+
+ constexpr T& value() & {
+ CHECK(storage_.is_populated_);
+ return storage_.value_;
+ }
+
+ constexpr const T& value() const & {
+ CHECK(storage_.is_populated_);
+ return storage_.value_;
+ }
+
+ constexpr T&& value() && {
+ CHECK(storage_.is_populated_);
+ return std::move(storage_.value_);
+ }
+
+ constexpr const T&& value() const && {
+ CHECK(storage_.is_populated_);
+ return std::move(storage_.value_);
+ }
+
+ template <class U>
+ constexpr T value_or(U&& default_value) const& {
+ // TODO(mlamouri): add the following assert when possible:
+ // static_assert(std::is_copy_constructible<T>::value,
+ // "T must be copy constructible");
+ static_assert(std::is_convertible<U, T>::value,
+ "U must be convertible to T");
+ return storage_.is_populated_
+ ? storage_.value_
+ : static_cast<T>(std::forward<U>(default_value));
+ }
+
+ template <class U>
+ constexpr T value_or(U&& default_value) && {
+ // TODO(mlamouri): add the following assert when possible:
+ // static_assert(std::is_move_constructible<T>::value,
+ // "T must be move constructible");
+ static_assert(std::is_convertible<U, T>::value,
+ "U must be convertible to T");
+ return storage_.is_populated_
+ ? std::move(storage_.value_)
+ : static_cast<T>(std::forward<U>(default_value));
+ }
+
+ void swap(Optional& other) {
+ if (!storage_.is_populated_ && !other.storage_.is_populated_)
+ return;
+
+ if (storage_.is_populated_ != other.storage_.is_populated_) {
+ if (storage_.is_populated_) {
+ other.storage_.Init(std::move(storage_.value_));
+ FreeIfNeeded();
+ } else {
+ storage_.Init(std::move(other.storage_.value_));
+ other.FreeIfNeeded();
+ }
+ return;
+ }
+
+ DCHECK(storage_.is_populated_ && other.storage_.is_populated_);
+ using std::swap;
+ swap(**this, *other);
+ }
+
+ void reset() { FreeIfNeeded(); }
+
+ template <class... Args>
+ T& emplace(Args&&... args) {
+ FreeIfNeeded();
+ storage_.Init(std::forward<Args>(args)...);
+ return storage_.value_;
+ }
+
+ template <class U, class... Args>
+ std::enable_if_t<
+ std::is_constructible<T, std::initializer_list<U>&, Args&&...>::value,
+ T&>
+ emplace(std::initializer_list<U> il, Args&&... args) {
+ FreeIfNeeded();
+ storage_.Init(il, std::forward<Args>(args)...);
+ return storage_.value_;
+ }
+
+ private:
+ // Accessing template base class's protected member needs explicit
+ // declaration to do so.
+ using internal::OptionalBase<T>::CopyAssign;
+ using internal::OptionalBase<T>::FreeIfNeeded;
+ using internal::OptionalBase<T>::InitOrAssign;
+ using internal::OptionalBase<T>::MoveAssign;
+ using internal::OptionalBase<T>::storage_;
+};
+
+// Here after defines comparation operators. The definition follows
+// http://en.cppreference.com/w/cpp/utility/optional/operator_cmp
+// while bool() casting is replaced by has_value() to meet the chromium
+// style guide.
+template <class T, class U>
+constexpr bool operator==(const Optional<T>& lhs, const Optional<U>& rhs) {
+ if (lhs.has_value() != rhs.has_value())
+ return false;
+ if (!lhs.has_value())
+ return true;
+ return *lhs == *rhs;
+}
+
+template <class T, class U>
+constexpr bool operator!=(const Optional<T>& lhs, const Optional<U>& rhs) {
+ if (lhs.has_value() != rhs.has_value())
+ return true;
+ if (!lhs.has_value())
+ return false;
+ return *lhs != *rhs;
+}
+
+template <class T, class U>
+constexpr bool operator<(const Optional<T>& lhs, const Optional<U>& rhs) {
+ if (!rhs.has_value())
+ return false;
+ if (!lhs.has_value())
+ return true;
+ return *lhs < *rhs;
+}
+
+template <class T, class U>
+constexpr bool operator<=(const Optional<T>& lhs, const Optional<U>& rhs) {
+ if (!lhs.has_value())
+ return true;
+ if (!rhs.has_value())
+ return false;
+ return *lhs <= *rhs;
+}
+
+template <class T, class U>
+constexpr bool operator>(const Optional<T>& lhs, const Optional<U>& rhs) {
+ if (!lhs.has_value())
+ return false;
+ if (!rhs.has_value())
+ return true;
+ return *lhs > *rhs;
+}
+
+template <class T, class U>
+constexpr bool operator>=(const Optional<T>& lhs, const Optional<U>& rhs) {
+ if (!rhs.has_value())
+ return true;
+ if (!lhs.has_value())
+ return false;
+ return *lhs >= *rhs;
+}
+
+template <class T>
+constexpr bool operator==(const Optional<T>& opt, nullopt_t) {
+ return !opt;
+}
+
+template <class T>
+constexpr bool operator==(nullopt_t, const Optional<T>& opt) {
+ return !opt;
+}
+
+template <class T>
+constexpr bool operator!=(const Optional<T>& opt, nullopt_t) {
+ return opt.has_value();
+}
+
+template <class T>
+constexpr bool operator!=(nullopt_t, const Optional<T>& opt) {
+ return opt.has_value();
+}
+
+template <class T>
+constexpr bool operator<(const Optional<T>& opt, nullopt_t) {
+ return false;
+}
+
+template <class T>
+constexpr bool operator<(nullopt_t, const Optional<T>& opt) {
+ return opt.has_value();
+}
+
+template <class T>
+constexpr bool operator<=(const Optional<T>& opt, nullopt_t) {
+ return !opt;
+}
+
+template <class T>
+constexpr bool operator<=(nullopt_t, const Optional<T>& opt) {
+ return true;
+}
+
+template <class T>
+constexpr bool operator>(const Optional<T>& opt, nullopt_t) {
+ return opt.has_value();
+}
+
+template <class T>
+constexpr bool operator>(nullopt_t, const Optional<T>& opt) {
+ return false;
+}
+
+template <class T>
+constexpr bool operator>=(const Optional<T>& opt, nullopt_t) {
+ return true;
+}
+
+template <class T>
+constexpr bool operator>=(nullopt_t, const Optional<T>& opt) {
+ return !opt;
+}
+
+template <class T, class U>
+constexpr bool operator==(const Optional<T>& opt, const U& value) {
+ return opt.has_value() ? *opt == value : false;
+}
+
+template <class T, class U>
+constexpr bool operator==(const U& value, const Optional<T>& opt) {
+ return opt.has_value() ? value == *opt : false;
+}
+
+template <class T, class U>
+constexpr bool operator!=(const Optional<T>& opt, const U& value) {
+ return opt.has_value() ? *opt != value : true;
+}
+
+template <class T, class U>
+constexpr bool operator!=(const U& value, const Optional<T>& opt) {
+ return opt.has_value() ? value != *opt : true;
+}
+
+template <class T, class U>
+constexpr bool operator<(const Optional<T>& opt, const U& value) {
+ return opt.has_value() ? *opt < value : true;
+}
+
+template <class T, class U>
+constexpr bool operator<(const U& value, const Optional<T>& opt) {
+ return opt.has_value() ? value < *opt : false;
+}
+
+template <class T, class U>
+constexpr bool operator<=(const Optional<T>& opt, const U& value) {
+ return opt.has_value() ? *opt <= value : true;
+}
+
+template <class T, class U>
+constexpr bool operator<=(const U& value, const Optional<T>& opt) {
+ return opt.has_value() ? value <= *opt : false;
+}
+
+template <class T, class U>
+constexpr bool operator>(const Optional<T>& opt, const U& value) {
+ return opt.has_value() ? *opt > value : false;
+}
+
+template <class T, class U>
+constexpr bool operator>(const U& value, const Optional<T>& opt) {
+ return opt.has_value() ? value > *opt : true;
+}
+
+template <class T, class U>
+constexpr bool operator>=(const Optional<T>& opt, const U& value) {
+ return opt.has_value() ? *opt >= value : false;
+}
+
+template <class T, class U>
+constexpr bool operator>=(const U& value, const Optional<T>& opt) {
+ return opt.has_value() ? value >= *opt : true;
+}
+
+template <class T>
+constexpr Optional<std::decay_t<T>> make_optional(T&& value) {
+ return Optional<std::decay_t<T>>(std::forward<T>(value));
+}
+
+template <class T, class... Args>
+constexpr Optional<T> make_optional(Args&&... args) {
+ return Optional<T>(in_place, std::forward<Args>(args)...);
+}
+
+template <class T, class U, class... Args>
+constexpr Optional<T> make_optional(std::initializer_list<U> il,
+ Args&&... args) {
+ return Optional<T>(in_place, il, std::forward<Args>(args)...);
+}
+
+// Partial specialization for a function template is not allowed. Also, it is
+// not allowed to add overload function to std namespace, while it is allowed
+// to specialize the template in std. Thus, swap() (kind of) overloading is
+// defined in base namespace, instead.
+template <class T>
+std::enable_if_t<std::is_move_constructible<T>::value &&
+ internal::IsSwappable<T>::value>
+swap(Optional<T>& lhs, Optional<T>& rhs) {
+ lhs.swap(rhs);
+}
+
+} // namespace base
+
+namespace std {
+
+template <class T>
+struct hash<base::Optional<T>> {
+ size_t operator()(const base::Optional<T>& opt) const {
+ return opt == base::nullopt ? 0 : std::hash<T>()(*opt);
+ }
+};
+
+} // namespace std
+
+#endif // BASE_OPTIONAL_H_
diff --git a/security/sandbox/chromium/base/os_compat_android.h b/security/sandbox/chromium/base/os_compat_android.h
new file mode 100644
index 0000000000..e33b1f7ac3
--- /dev/null
+++ b/security/sandbox/chromium/base/os_compat_android.h
@@ -0,0 +1,21 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_OS_COMPAT_ANDROID_H_
+#define BASE_OS_COMPAT_ANDROID_H_
+
+#include <fcntl.h>
+#include <sys/types.h>
+#include <utime.h>
+
+// Not implemented in Bionic.
+extern "C" int futimes(int fd, const struct timeval tv[2]);
+
+// Not exposed or implemented in Bionic.
+extern "C" char* mkdtemp(char* path);
+
+// Android has no timegm().
+extern "C" time_t timegm(struct tm* const t);
+
+#endif // BASE_OS_COMPAT_ANDROID_H_
diff --git a/security/sandbox/chromium/base/path_service.h b/security/sandbox/chromium/base/path_service.h
new file mode 100644
index 0000000000..9b4715f073
--- /dev/null
+++ b/security/sandbox/chromium/base/path_service.h
@@ -0,0 +1,94 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_PATH_SERVICE_H_
+#define BASE_PATH_SERVICE_H_
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/base_paths.h"
+#include "base/gtest_prod_util.h"
+#include "build/build_config.h"
+
+namespace base {
+
+class FilePath;
+class ScopedPathOverride;
+
+// The path service is a global table mapping keys to file system paths. It is
+// OK to use this service from multiple threads.
+//
+class BASE_EXPORT PathService {
+ public:
+ // Retrieves a path to a special directory or file and places it into the
+ // string pointed to by 'path'. If you ask for a directory it is guaranteed
+ // to NOT have a path separator at the end. For example, "c:\windows\temp"
+ // Directories are also guaranteed to exist when this function succeeds.
+ //
+ // Returns true if the directory or file was successfully retrieved. On
+ // failure, 'path' will not be changed.
+ static bool Get(int key, FilePath* path);
+
+ // Overrides the path to a special directory or file. This cannot be used to
+ // change the value of DIR_CURRENT, but that should be obvious. Also, if the
+ // path specifies a directory that does not exist, the directory will be
+ // created by this method. This method returns true if successful.
+ //
+ // If the given path is relative, then it will be resolved against
+ // DIR_CURRENT.
+ //
+ // WARNING: Consumers of PathService::Get may expect paths to be constant
+ // over the lifetime of the app, so this method should be used with caution.
+ //
+ // Unit tests generally should use ScopedPathOverride instead. Overrides from
+ // one test should not carry over to another.
+ static bool Override(int key, const FilePath& path);
+
+ // This function does the same as PathService::Override but it takes extra
+ // parameters:
+ // - |is_absolute| indicates that |path| has already been expanded into an
+ // absolute path, otherwise MakeAbsoluteFilePath() will be used. This is
+ // useful to override paths that may not exist yet, since MakeAbsoluteFilePath
+ // fails for those. Note that MakeAbsoluteFilePath also expands symbolic
+ // links, even if path.IsAbsolute() is already true.
+ // - |create| guides whether the directory to be overriden must
+ // be created in case it doesn't exist already.
+ static bool OverrideAndCreateIfNeeded(int key,
+ const FilePath& path,
+ bool is_absolute,
+ bool create);
+
+ // To extend the set of supported keys, you can register a path provider,
+ // which is just a function mirroring PathService::Get. The ProviderFunc
+ // returns false if it cannot provide a non-empty path for the given key.
+ // Otherwise, true is returned.
+ //
+ // WARNING: This function could be called on any thread from which the
+ // PathService is used, so a the ProviderFunc MUST BE THREADSAFE.
+ //
+ typedef bool (*ProviderFunc)(int, FilePath*);
+
+ // Call to register a path provider. You must specify the range "[key_start,
+ // key_end)" of supported path keys.
+ static void RegisterProvider(ProviderFunc provider,
+ int key_start,
+ int key_end);
+
+ // Disable internal cache.
+ static void DisableCache();
+
+ private:
+ friend class ScopedPathOverride;
+ FRIEND_TEST_ALL_PREFIXES(PathServiceTest, RemoveOverride);
+
+ // Removes an override for a special directory or file. Returns true if there
+ // was an override to remove or false if none was present.
+ // NOTE: This function is intended to be used by tests only!
+ static bool RemoveOverride(int key);
+};
+
+} // namespace base
+
+#endif // BASE_PATH_SERVICE_H_
diff --git a/security/sandbox/chromium/base/posix/can_lower_nice_to.cc b/security/sandbox/chromium/base/posix/can_lower_nice_to.cc
new file mode 100644
index 0000000000..b1686dcae1
--- /dev/null
+++ b/security/sandbox/chromium/base/posix/can_lower_nice_to.cc
@@ -0,0 +1,60 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/posix/can_lower_nice_to.h"
+
+#include <limits.h>
+#include <sys/resource.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include "build/build_config.h"
+
+// Not defined on AIX by default.
+#if defined(OS_AIX)
+#if defined(RLIMIT_NICE)
+#error Assumption about OS_AIX is incorrect
+#endif
+#define RLIMIT_NICE 20
+#endif
+
+namespace base {
+namespace internal {
+
+bool CanLowerNiceTo(int nice_value) {
+ // On a POSIX system, the nice value of a thread can be lowered 1. by the root
+ // user, 2. by a user with the CAP_SYS_NICE permission or 3. by any user if
+ // the target value is within the range allowed by RLIMIT_NICE.
+
+ // 1. Check for root user.
+ if (geteuid() == 0)
+ return true;
+
+ // 2. Skip checking the CAP_SYS_NICE permission because it would require
+ // libcap.so.
+
+ // 3. Check whether the target value is within the range allowed by
+ // RLIMIT_NICE.
+ //
+ // NZERO should be defined in <limits.h> per POSIX, and should be at least 20.
+ // (NZERO-1) is the highest possible niceness value (i.e. lowest priority).
+ // Most platforms use NZERO=20.
+ //
+ // RLIMIT_NICE tells us how much we can reduce niceness (increase priority) if
+ // we start at NZERO. For example, if NZERO is 20 and the rlimit is 30, we can
+ // lower niceness anywhere within the [-10, 19] range (20 - 30 = -10).
+ //
+ // So, we are allowed to reduce niceness to a minimum of NZERO - rlimit:
+ struct rlimit rlim;
+ if (getrlimit(RLIMIT_NICE, &rlim) != 0)
+ return false;
+ const int lowest_nice_allowed = NZERO - static_cast<int>(rlim.rlim_cur);
+
+ // And lowering niceness to |nice_value| is allowed if it is greater than or
+ // equal to the limit:
+ return nice_value >= lowest_nice_allowed;
+}
+
+} // namespace internal
+} // namespace base
diff --git a/security/sandbox/chromium/base/posix/can_lower_nice_to.h b/security/sandbox/chromium/base/posix/can_lower_nice_to.h
new file mode 100644
index 0000000000..aa8f02e9f7
--- /dev/null
+++ b/security/sandbox/chromium/base/posix/can_lower_nice_to.h
@@ -0,0 +1,19 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_POSIX_CAN_LOWER_NICE_TO_H_
+#define BASE_POSIX_CAN_LOWER_NICE_TO_H_
+
+namespace base {
+namespace internal {
+
+// Returns true if lowering the nice value of a process or thread to
+// |nice_value| using setpriority() or nice() should succeed. Note: A lower nice
+// value means a higher priority.
+bool CanLowerNiceTo(int nice_value);
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_POSIX_CAN_LOWER_NICE_TO_H_
diff --git a/security/sandbox/chromium/base/posix/eintr_wrapper.h b/security/sandbox/chromium/base/posix/eintr_wrapper.h
new file mode 100644
index 0000000000..0e6e437953
--- /dev/null
+++ b/security/sandbox/chromium/base/posix/eintr_wrapper.h
@@ -0,0 +1,68 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This provides a wrapper around system calls which may be interrupted by a
+// signal and return EINTR. See man 7 signal.
+// To prevent long-lasting loops (which would likely be a bug, such as a signal
+// that should be masked) to go unnoticed, there is a limit after which the
+// caller will nonetheless see an EINTR in Debug builds.
+//
+// On Windows and Fuchsia, this wrapper macro does nothing because there are no
+// signals.
+//
+// Don't wrap close calls in HANDLE_EINTR. Use IGNORE_EINTR if the return
+// value of close is significant. See http://crbug.com/269623.
+
+#ifndef BASE_POSIX_EINTR_WRAPPER_H_
+#define BASE_POSIX_EINTR_WRAPPER_H_
+
+#include "build/build_config.h"
+
+#if defined(OS_POSIX)
+
+#include <errno.h>
+
+#if defined(NDEBUG)
+
+#define HANDLE_EINTR(x) ({ \
+ decltype(x) eintr_wrapper_result; \
+ do { \
+ eintr_wrapper_result = (x); \
+ } while (eintr_wrapper_result == -1 && errno == EINTR); \
+ eintr_wrapper_result; \
+})
+
+#else
+
+#define HANDLE_EINTR(x) ({ \
+ int eintr_wrapper_counter = 0; \
+ decltype(x) eintr_wrapper_result; \
+ do { \
+ eintr_wrapper_result = (x); \
+ } while (eintr_wrapper_result == -1 && errno == EINTR && \
+ eintr_wrapper_counter++ < 100); \
+ eintr_wrapper_result; \
+})
+
+#endif // NDEBUG
+
+#define IGNORE_EINTR(x) ({ \
+ decltype(x) eintr_wrapper_result; \
+ do { \
+ eintr_wrapper_result = (x); \
+ if (eintr_wrapper_result == -1 && errno == EINTR) { \
+ eintr_wrapper_result = 0; \
+ } \
+ } while (0); \
+ eintr_wrapper_result; \
+})
+
+#else // !OS_POSIX
+
+#define HANDLE_EINTR(x) (x)
+#define IGNORE_EINTR(x) (x)
+
+#endif // !OS_POSIX
+
+#endif // BASE_POSIX_EINTR_WRAPPER_H_
diff --git a/security/sandbox/chromium/base/posix/safe_strerror.cc b/security/sandbox/chromium/base/posix/safe_strerror.cc
new file mode 100644
index 0000000000..aef5742d33
--- /dev/null
+++ b/security/sandbox/chromium/base/posix/safe_strerror.cc
@@ -0,0 +1,128 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if defined(__ANDROID__)
+// Post-L versions of bionic define the GNU-specific strerror_r if _GNU_SOURCE
+// is defined, but the symbol is renamed to __gnu_strerror_r which only exists
+// on those later versions. To preserve ABI compatibility with older versions,
+// undefine _GNU_SOURCE and use the POSIX version.
+#undef _GNU_SOURCE
+#endif
+
+#include "base/posix/safe_strerror.h"
+
+#include <errno.h>
+#include <stdio.h>
+#include <string.h>
+
+#include "build/build_config.h"
+
+namespace base {
+
+#if defined(__GLIBC__) || defined(OS_NACL)
+#define USE_HISTORICAL_STRERRO_R 1
+#else
+#define USE_HISTORICAL_STRERRO_R 0
+#endif
+
+#if USE_HISTORICAL_STRERRO_R && defined(__GNUC__)
+// GCC will complain about the unused second wrap function unless we tell it
+// that we meant for them to be potentially unused, which is exactly what this
+// attribute is for.
+#define POSSIBLY_UNUSED __attribute__((unused))
+#else
+#define POSSIBLY_UNUSED
+#endif
+
+#if USE_HISTORICAL_STRERRO_R
+// glibc has two strerror_r functions: a historical GNU-specific one that
+// returns type char *, and a POSIX.1-2001 compliant one available since 2.3.4
+// that returns int. This wraps the GNU-specific one.
+static void POSSIBLY_UNUSED wrap_posix_strerror_r(
+ char *(*strerror_r_ptr)(int, char *, size_t),
+ int err,
+ char *buf,
+ size_t len) {
+ // GNU version.
+ char *rc = (*strerror_r_ptr)(err, buf, len);
+ if (rc != buf) {
+ // glibc did not use buf and returned a static string instead. Copy it
+ // into buf.
+ buf[0] = '\0';
+ strncat(buf, rc, len - 1);
+ }
+ // The GNU version never fails. Unknown errors get an "unknown error" message.
+ // The result is always null terminated.
+}
+#endif // USE_HISTORICAL_STRERRO_R
+
+// Wrapper for strerror_r functions that implement the POSIX interface. POSIX
+// does not define the behaviour for some of the edge cases, so we wrap it to
+// guarantee that they are handled. This is compiled on all POSIX platforms, but
+// it will only be used on Linux if the POSIX strerror_r implementation is
+// being used (see below).
+static void POSSIBLY_UNUSED wrap_posix_strerror_r(
+ int (*strerror_r_ptr)(int, char *, size_t),
+ int err,
+ char *buf,
+ size_t len) {
+ int old_errno = errno;
+ // Have to cast since otherwise we get an error if this is the GNU version
+ // (but in such a scenario this function is never called). Sadly we can't use
+ // C++-style casts because the appropriate one is reinterpret_cast but it's
+ // considered illegal to reinterpret_cast a type to itself, so we get an
+ // error in the opposite case.
+ int result = (*strerror_r_ptr)(err, buf, len);
+ if (result == 0) {
+ // POSIX is vague about whether the string will be terminated, although
+ // it indirectly implies that typically ERANGE will be returned, instead
+ // of truncating the string. We play it safe by always terminating the
+ // string explicitly.
+ buf[len - 1] = '\0';
+ } else {
+ // Error. POSIX is vague about whether the return value is itself a system
+ // error code or something else. On Linux currently it is -1 and errno is
+ // set. On BSD-derived systems it is a system error and errno is unchanged.
+ // We try and detect which case it is so as to put as much useful info as
+ // we can into our message.
+ int strerror_error; // The error encountered in strerror
+ int new_errno = errno;
+ if (new_errno != old_errno) {
+ // errno was changed, so probably the return value is just -1 or something
+ // else that doesn't provide any info, and errno is the error.
+ strerror_error = new_errno;
+ } else {
+ // Either the error from strerror_r was the same as the previous value, or
+ // errno wasn't used. Assume the latter.
+ strerror_error = result;
+ }
+ // snprintf truncates and always null-terminates.
+ snprintf(buf,
+ len,
+ "Error %d while retrieving error %d",
+ strerror_error,
+ err);
+ }
+ errno = old_errno;
+}
+
+void safe_strerror_r(int err, char *buf, size_t len) {
+ if (buf == nullptr || len <= 0) {
+ return;
+ }
+ // If using glibc (i.e., Linux), the compiler will automatically select the
+ // appropriate overloaded function based on the function type of strerror_r.
+ // The other one will be elided from the translation unit since both are
+ // static.
+ wrap_posix_strerror_r(&strerror_r, err, buf, len);
+}
+
+std::string safe_strerror(int err) {
+ const int buffer_size = 256;
+ char buf[buffer_size];
+ safe_strerror_r(err, buf, sizeof(buf));
+ return std::string(buf);
+}
+
+} // namespace base
diff --git a/security/sandbox/chromium/base/posix/safe_strerror.h b/security/sandbox/chromium/base/posix/safe_strerror.h
new file mode 100644
index 0000000000..2945312910
--- /dev/null
+++ b/security/sandbox/chromium/base/posix/safe_strerror.h
@@ -0,0 +1,44 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_POSIX_SAFE_STRERROR_H_
+#define BASE_POSIX_SAFE_STRERROR_H_
+
+#include <stddef.h>
+
+#include <string>
+
+#include "base/base_export.h"
+
+namespace base {
+
+// BEFORE using anything from this file, first look at PLOG and friends in
+// logging.h and use them instead if applicable.
+//
+// This file declares safe, portable alternatives to the POSIX strerror()
+// function. strerror() is inherently unsafe in multi-threaded apps and should
+// never be used. Doing so can cause crashes. Additionally, the thread-safe
+// alternative strerror_r varies in semantics across platforms. Use these
+// functions instead.
+
+// Thread-safe strerror function with dependable semantics that never fails.
+// It will write the string form of error "err" to buffer buf of length len.
+// If there is an error calling the OS's strerror_r() function then a message to
+// that effect will be printed into buf, truncating if necessary. The final
+// result is always null-terminated. The value of errno is never changed.
+//
+// Use this instead of strerror_r().
+BASE_EXPORT void safe_strerror_r(int err, char *buf, size_t len);
+
+// Calls safe_strerror_r with a buffer of suitable size and returns the result
+// in a C++ string.
+//
+// Use this instead of strerror(). Note though that safe_strerror_r will be
+// more robust in the case of heap corruption errors, since it doesn't need to
+// allocate a string.
+BASE_EXPORT std::string safe_strerror(int err);
+
+} // namespace base
+
+#endif // BASE_POSIX_SAFE_STRERROR_H_
diff --git a/security/sandbox/chromium/base/process/environment_internal.cc b/security/sandbox/chromium/base/process/environment_internal.cc
new file mode 100644
index 0000000000..357140fa6f
--- /dev/null
+++ b/security/sandbox/chromium/base/process/environment_internal.cc
@@ -0,0 +1,128 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/environment_internal.h"
+
+#include <stddef.h>
+
+#include <vector>
+
+namespace base {
+namespace internal {
+
+namespace {
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA) || defined(OS_WIN)
+// Parses a null-terminated input string of an environment block. The key is
+// placed into the given string, and the total length of the line, including
+// the terminating null, is returned.
+size_t ParseEnvLine(const NativeEnvironmentString::value_type* input,
+ NativeEnvironmentString* key) {
+ // Skip to the equals or end of the string, this is the key.
+ size_t cur = 0;
+ while (input[cur] && input[cur] != '=')
+ cur++;
+ *key = NativeEnvironmentString(&input[0], cur);
+
+ // Now just skip to the end of the string.
+ while (input[cur])
+ cur++;
+ return cur + 1;
+}
+#endif
+
+} // namespace
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+
+std::unique_ptr<char* []> AlterEnvironment(const char* const* const env,
+ const EnvironmentMap& changes) {
+ std::string value_storage; // Holds concatenated null-terminated strings.
+ std::vector<size_t> result_indices; // Line indices into value_storage.
+
+ // First build up all of the unchanged environment strings. These are
+ // null-terminated of the form "key=value".
+ std::string key;
+ for (size_t i = 0; env[i]; i++) {
+ size_t line_length = ParseEnvLine(env[i], &key);
+
+ // Keep only values not specified in the change vector.
+ auto found_change = changes.find(key);
+ if (found_change == changes.end()) {
+ result_indices.push_back(value_storage.size());
+ value_storage.append(env[i], line_length);
+ }
+ }
+
+ // Now append all modified and new values.
+ for (const auto& i : changes) {
+ if (!i.second.empty()) {
+ result_indices.push_back(value_storage.size());
+ value_storage.append(i.first);
+ value_storage.push_back('=');
+ value_storage.append(i.second);
+ value_storage.push_back(0);
+ }
+ }
+
+ size_t pointer_count_required =
+ result_indices.size() + 1 + // Null-terminated array of pointers.
+ (value_storage.size() + sizeof(char*) - 1) / sizeof(char*); // Buffer.
+ std::unique_ptr<char*[]> result(new char*[pointer_count_required]);
+
+ // The string storage goes after the array of pointers.
+ char* storage_data =
+ reinterpret_cast<char*>(&result.get()[result_indices.size() + 1]);
+ if (!value_storage.empty())
+ memcpy(storage_data, value_storage.data(), value_storage.size());
+
+ // Fill array of pointers at the beginning of the result.
+ for (size_t i = 0; i < result_indices.size(); i++)
+ result[i] = &storage_data[result_indices[i]];
+ result[result_indices.size()] = 0; // Null terminator.
+
+ return result;
+}
+
+#elif defined(OS_WIN)
+
+NativeEnvironmentString AlterEnvironment(const wchar_t* env,
+ const EnvironmentMap& changes) {
+ NativeEnvironmentString result;
+
+ // First build up all of the unchanged environment strings.
+ const wchar_t* ptr = env;
+ while (*ptr) {
+ std::wstring key;
+ size_t line_length = ParseEnvLine(ptr, &key);
+
+ // Keep only values not specified in the change vector.
+ if (changes.find(key) == changes.end()) {
+ result.append(ptr, line_length);
+ }
+ ptr += line_length;
+ }
+
+ // Now append all modified and new values.
+ for (const auto& i : changes) {
+ // Windows environment blocks cannot handle keys or values with NULs.
+ CHECK_EQ(std::wstring::npos, i.first.find(L'\0'));
+ CHECK_EQ(std::wstring::npos, i.second.find(L'\0'));
+ if (!i.second.empty()) {
+ result += i.first;
+ result.push_back('=');
+ result += i.second;
+ result.push_back('\0');
+ }
+ }
+
+ // Add the terminating NUL.
+ result.push_back('\0');
+ return result;
+}
+
+#endif // OS_POSIX || OS_FUCHSIA
+
+} // namespace internal
+} // namespace base
diff --git a/security/sandbox/chromium/base/process/environment_internal.h b/security/sandbox/chromium/base/process/environment_internal.h
new file mode 100644
index 0000000000..31338f1320
--- /dev/null
+++ b/security/sandbox/chromium/base/process/environment_internal.h
@@ -0,0 +1,52 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains internal routines that are called by other files in
+// base/process/.
+
+#ifndef BASE_PROCESS_ENVIRONMENT_INTERNAL_H_
+#define BASE_PROCESS_ENVIRONMENT_INTERNAL_H_
+
+#include <memory>
+
+#include "base/environment.h"
+#include "build/build_config.h"
+
+namespace base {
+namespace internal {
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+// Returns a modified environment vector constructed from the given environment
+// and the list of changes given in |changes|. Each key in the environment is
+// matched against the first element of the pairs. In the event of a match, the
+// value is replaced by the second of the pair, unless the second is empty, in
+// which case the key-value is removed.
+//
+// This POSIX version takes and returns a POSIX-style environment block, which
+// is a null-terminated list of pointers to null-terminated strings. The
+// returned array will have appended to it the storage for the array itself so
+// there is only one pointer to manage, but this means that you can't copy the
+// array without keeping the original around.
+BASE_EXPORT std::unique_ptr<char*[]> AlterEnvironment(
+ const char* const* env,
+ const EnvironmentMap& changes);
+#elif defined(OS_WIN)
+// Returns a modified environment vector constructed from the given environment
+// and the list of changes given in |changes|. Each key in the environment is
+// matched against the first element of the pairs. In the event of a match, the
+// value is replaced by the second of the pair, unless the second is empty, in
+// which case the key-value is removed.
+//
+// This Windows version takes and returns a Windows-style environment block,
+// which is a string containing several null-terminated strings followed by an
+// extra terminating null character. So, e.g., the environment A=1 B=2 is
+// represented as L"A=1\0B=2\0\0".
+BASE_EXPORT NativeEnvironmentString
+AlterEnvironment(const wchar_t* env, const EnvironmentMap& changes);
+#endif // OS_*
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_PROCESS_ENVIRONMENT_INTERNAL_H_
diff --git a/security/sandbox/chromium/base/process/kill.h b/security/sandbox/chromium/base/process/kill.h
new file mode 100644
index 0000000000..70a04d97e5
--- /dev/null
+++ b/security/sandbox/chromium/base/process/kill.h
@@ -0,0 +1,162 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains routines to kill processes and get the exit code and
+// termination status.
+
+#ifndef BASE_PROCESS_KILL_H_
+#define BASE_PROCESS_KILL_H_
+
+#include "base/files/file_path.h"
+#include "base/process/process.h"
+#include "base/process/process_handle.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+
+namespace base {
+
+class ProcessFilter;
+
+#if defined(OS_WIN)
+namespace win {
+
+// See definition in sandbox/win/src/sandbox_types.h
+const DWORD kSandboxFatalMemoryExceeded = 7012;
+
+// Exit codes with special meanings on Windows.
+const DWORD kNormalTerminationExitCode = 0;
+const DWORD kDebuggerInactiveExitCode = 0xC0000354;
+const DWORD kKeyboardInterruptExitCode = 0xC000013A;
+const DWORD kDebuggerTerminatedExitCode = 0x40010004;
+const DWORD kStatusInvalidImageHashExitCode = 0xC0000428;
+
+// This exit code is used by the Windows task manager when it kills a
+// process. It's value is obviously not that unique, and it's
+// surprising to me that the task manager uses this value, but it
+// seems to be common practice on Windows to test for it as an
+// indication that the task manager has killed something if the
+// process goes away.
+const DWORD kProcessKilledExitCode = 1;
+
+} // namespace win
+
+#endif // OS_WIN
+
+// Return status values from GetTerminationStatus. Don't use these as
+// exit code arguments to KillProcess*(), use platform/application
+// specific values instead.
+enum TerminationStatus {
+ // clang-format off
+ TERMINATION_STATUS_NORMAL_TERMINATION, // zero exit status
+ TERMINATION_STATUS_ABNORMAL_TERMINATION, // non-zero exit status
+ TERMINATION_STATUS_PROCESS_WAS_KILLED, // e.g. SIGKILL or task manager kill
+ TERMINATION_STATUS_PROCESS_CRASHED, // e.g. Segmentation fault
+ TERMINATION_STATUS_STILL_RUNNING, // child hasn't exited yet
+#if defined(OS_CHROMEOS)
+ // Used for the case when oom-killer kills a process on ChromeOS.
+ TERMINATION_STATUS_PROCESS_WAS_KILLED_BY_OOM,
+#endif
+#if defined(OS_ANDROID)
+ // On Android processes are spawned from the system Zygote and we do not get
+ // the termination status. We can't know if the termination was a crash or an
+ // oom kill for sure, but we can use status of the strong process bindings as
+ // a hint.
+ TERMINATION_STATUS_OOM_PROTECTED, // child was protected from oom kill
+#endif
+ TERMINATION_STATUS_LAUNCH_FAILED, // child process never launched
+ TERMINATION_STATUS_OOM, // Process died due to oom
+#if defined(OS_WIN)
+ // On Windows, the OS terminated process due to code integrity failure.
+ TERMINATION_STATUS_INTEGRITY_FAILURE,
+#endif
+ TERMINATION_STATUS_MAX_ENUM
+ // clang-format on
+};
+
+// Attempts to kill all the processes on the current machine that were launched
+// from the given executable name, ending them with the given exit code. If
+// filter is non-null, then only processes selected by the filter are killed.
+// Returns true if all processes were able to be killed off, false if at least
+// one couldn't be killed.
+BASE_EXPORT bool KillProcesses(const FilePath::StringType& executable_name,
+ int exit_code,
+ const ProcessFilter* filter);
+
+#if defined(OS_POSIX)
+// Attempts to kill the process group identified by |process_group_id|. Returns
+// true on success.
+BASE_EXPORT bool KillProcessGroup(ProcessHandle process_group_id);
+#endif // defined(OS_POSIX)
+
+// Get the termination status of the process by interpreting the
+// circumstances of the child process' death. |exit_code| is set to
+// the status returned by waitpid() on POSIX, and from GetExitCodeProcess() on
+// Windows, and may not be null. Note that on Linux, this function
+// will only return a useful result the first time it is called after
+// the child exits (because it will reap the child and the information
+// will no longer be available).
+BASE_EXPORT TerminationStatus GetTerminationStatus(ProcessHandle handle,
+ int* exit_code);
+
+#if defined(OS_POSIX)
+// Send a kill signal to the process and then wait for the process to exit
+// and get the termination status.
+//
+// This is used in situations where it is believed that the process is dead
+// or dying (because communication with the child process has been cut).
+// In order to avoid erroneously returning that the process is still running
+// because the kernel is still cleaning it up, this will wait for the process
+// to terminate. In order to avoid the risk of hanging while waiting for the
+// process to terminate, send a SIGKILL to the process before waiting for the
+// termination status.
+//
+// Note that it is not an option to call WaitForExitCode and then
+// GetTerminationStatus as the child will be reaped when WaitForExitCode
+// returns, and this information will be lost.
+//
+BASE_EXPORT TerminationStatus GetKnownDeadTerminationStatus(
+ ProcessHandle handle, int* exit_code);
+
+#if defined(OS_LINUX)
+// Spawns a thread to wait asynchronously for the child |process| to exit
+// and then reaps it.
+BASE_EXPORT void EnsureProcessGetsReaped(Process process);
+#endif // defined(OS_LINUX)
+#endif // defined(OS_POSIX)
+
+// Registers |process| to be asynchronously monitored for termination, forcibly
+// terminated if necessary, and reaped on exit. The caller should have signalled
+// |process| to exit before calling this API. The API will allow a couple of
+// seconds grace period before forcibly terminating |process|.
+// TODO(https://crbug.com/806451): The Mac implementation currently blocks the
+// calling thread for up to two seconds.
+BASE_EXPORT void EnsureProcessTerminated(Process process);
+
+// These are only sparingly used, and not needed on Fuchsia. They could be
+// implemented if necessary.
+#if !defined(OS_FUCHSIA)
+// Wait for all the processes based on the named executable to exit. If filter
+// is non-null, then only processes selected by the filter are waited on.
+// Returns after all processes have exited or wait_milliseconds have expired.
+// Returns true if all the processes exited, false otherwise.
+BASE_EXPORT bool WaitForProcessesToExit(
+ const FilePath::StringType& executable_name,
+ base::TimeDelta wait,
+ const ProcessFilter* filter);
+
+// Waits a certain amount of time (can be 0) for all the processes with a given
+// executable name to exit, then kills off any of them that are still around.
+// If filter is non-null, then only processes selected by the filter are waited
+// on. Killed processes are ended with the given exit code. Returns false if
+// any processes needed to be killed, true if they all exited cleanly within
+// the wait_milliseconds delay.
+BASE_EXPORT bool CleanupProcesses(const FilePath::StringType& executable_name,
+ base::TimeDelta wait,
+ int exit_code,
+ const ProcessFilter* filter);
+#endif // !defined(OS_FUCHSIA)
+
+} // namespace base
+
+#endif // BASE_PROCESS_KILL_H_
diff --git a/security/sandbox/chromium/base/process/memory.h b/security/sandbox/chromium/base/process/memory.h
new file mode 100644
index 0000000000..ddbb9d957c
--- /dev/null
+++ b/security/sandbox/chromium/base/process/memory.h
@@ -0,0 +1,89 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_PROCESS_MEMORY_H_
+#define BASE_PROCESS_MEMORY_H_
+
+#include <stddef.h>
+
+#include "base/base_export.h"
+#include "base/process/process_handle.h"
+#include "build/build_config.h"
+
+namespace base {
+
+// Enables 'terminate on heap corruption' flag. Helps protect against heap
+// overflow. Has no effect if the OS doesn't provide the necessary facility.
+BASE_EXPORT void EnableTerminationOnHeapCorruption();
+
+// Turns on process termination if memory runs out.
+BASE_EXPORT void EnableTerminationOnOutOfMemory();
+
+// Terminates process. Should be called only for out of memory errors.
+// Crash reporting classifies such crashes as OOM.
+BASE_EXPORT void TerminateBecauseOutOfMemory(size_t size);
+
+#if defined(OS_LINUX) || defined(OS_ANDROID) || defined(OS_AIX)
+BASE_EXPORT extern size_t g_oom_size;
+
+// The maximum allowed value for the OOM score.
+const int kMaxOomScore = 1000;
+
+// This adjusts /proc/<pid>/oom_score_adj so the Linux OOM killer will
+// prefer to kill certain process types over others. The range for the
+// adjustment is [-1000, 1000], with [0, 1000] being user accessible.
+// If the Linux system doesn't support the newer oom_score_adj range
+// of [0, 1000], then we revert to using the older oom_adj, and
+// translate the given value into [0, 15]. Some aliasing of values
+// may occur in that case, of course.
+BASE_EXPORT bool AdjustOOMScore(ProcessId process, int score);
+#endif
+
+namespace internal {
+// Returns true if address-space was released. Some configurations reserve part
+// of the process address-space for special allocations (e.g. WASM).
+bool ReleaseAddressSpaceReservation();
+} // namespace internal
+
+#if defined(OS_WIN)
+namespace win {
+
+// Custom Windows exception code chosen to indicate an out of memory error.
+// See https://msdn.microsoft.com/en-us/library/het71c37.aspx.
+// "To make sure that you do not define a code that conflicts with an existing
+// exception code" ... "The resulting error code should therefore have the
+// highest four bits set to hexadecimal E."
+// 0xe0000008 was chosen arbitrarily, as 0x00000008 is ERROR_NOT_ENOUGH_MEMORY.
+const DWORD kOomExceptionCode = 0xe0000008;
+
+} // namespace win
+#endif
+
+namespace internal {
+
+// Handles out of memory, with the failed allocation |size|, or 0 when it is not
+// known.
+BASE_EXPORT void OnNoMemoryInternal(size_t size);
+
+} // namespace internal
+
+// Special allocator functions for callers that want to check for OOM.
+// These will not abort if the allocation fails even if
+// EnableTerminationOnOutOfMemory has been called.
+// This can be useful for huge and/or unpredictable size memory allocations.
+// Please only use this if you really handle the case when the allocation
+// fails. Doing otherwise would risk security.
+// These functions may still crash on OOM when running under memory tools,
+// specifically ASan and other sanitizers.
+// Return value tells whether the allocation succeeded. If it fails |result| is
+// set to NULL, otherwise it holds the memory address.
+BASE_EXPORT WARN_UNUSED_RESULT bool UncheckedMalloc(size_t size,
+ void** result);
+BASE_EXPORT WARN_UNUSED_RESULT bool UncheckedCalloc(size_t num_items,
+ size_t size,
+ void** result);
+
+} // namespace base
+
+#endif // BASE_PROCESS_MEMORY_H_
diff --git a/security/sandbox/chromium/base/process/process.h b/security/sandbox/chromium/base/process/process.h
new file mode 100644
index 0000000000..d6f8d83e36
--- /dev/null
+++ b/security/sandbox/chromium/base/process/process.h
@@ -0,0 +1,223 @@
+// Copyright 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_PROCESS_PROCESS_H_
+#define BASE_PROCESS_PROCESS_H_
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/process/process_handle.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include "base/win/scoped_handle.h"
+#endif
+
+#if defined(OS_FUCHSIA)
+#include <lib/zx/process.h>
+#endif
+
+#if defined(OS_MACOSX)
+#include "base/feature_list.h"
+#include "base/process/port_provider_mac.h"
+#endif
+
+namespace base {
+
+#if defined(OS_MACOSX)
+extern const Feature kMacAllowBackgroundingProcesses;
+#endif
+
+// Provides a move-only encapsulation of a process.
+//
+// This object is not tied to the lifetime of the underlying process: the
+// process may be killed and this object may still around, and it will still
+// claim to be valid. The actual behavior in that case is OS dependent like so:
+//
+// Windows: The underlying ProcessHandle will be valid after the process dies
+// and can be used to gather some information about that process, but most
+// methods will obviously fail.
+//
+// POSIX: The underlying ProcessHandle is not guaranteed to remain valid after
+// the process dies, and it may be reused by the system, which means that it may
+// end up pointing to the wrong process.
+class BASE_EXPORT Process {
+ public:
+ // On Windows, this takes ownership of |handle|. On POSIX, this does not take
+ // ownership of |handle|.
+ explicit Process(ProcessHandle handle = kNullProcessHandle);
+
+ Process(Process&& other);
+
+ // The destructor does not terminate the process.
+ ~Process();
+
+ Process& operator=(Process&& other);
+
+ // Returns an object for the current process.
+ static Process Current();
+
+ // Returns a Process for the given |pid|.
+ static Process Open(ProcessId pid);
+
+ // Returns a Process for the given |pid|. On Windows the handle is opened
+ // with more access rights and must only be used by trusted code (can read the
+ // address space and duplicate handles).
+ static Process OpenWithExtraPrivileges(ProcessId pid);
+
+#if defined(OS_WIN)
+ // Returns a Process for the given |pid|, using some |desired_access|.
+ // See ::OpenProcess documentation for valid |desired_access|.
+ static Process OpenWithAccess(ProcessId pid, DWORD desired_access);
+#endif
+
+ // Creates an object from a |handle| owned by someone else.
+ // Don't use this for new code. It is only intended to ease the migration to
+ // a strict ownership model.
+ // TODO(rvargas) crbug.com/417532: Remove this code.
+ static Process DeprecatedGetProcessFromHandle(ProcessHandle handle);
+
+ // Returns true if processes can be backgrounded.
+ static bool CanBackgroundProcesses();
+
+ // Terminates the current process immediately with |exit_code|.
+ [[noreturn]] static void TerminateCurrentProcessImmediately(int exit_code);
+
+ // Returns true if this objects represents a valid process.
+ bool IsValid() const;
+
+ // Returns a handle for this process. There is no guarantee about when that
+ // handle becomes invalid because this object retains ownership.
+ ProcessHandle Handle() const;
+
+ // Returns a second object that represents this process.
+ Process Duplicate() const;
+
+ // Get the PID for this process.
+ ProcessId Pid() const;
+
+#if !defined(OS_ANDROID)
+ // Get the creation time for this process. Since the Pid can be reused after a
+ // process dies, it is useful to use both the Pid and the creation time to
+ // uniquely identify a process.
+ //
+ // Not available on Android because /proc/stat/ cannot be accessed on O+.
+ // https://issuetracker.google.com/issues/37140047
+ Time CreationTime() const;
+#endif // !defined(OS_ANDROID)
+
+ // Returns true if this process is the current process.
+ bool is_current() const;
+
+ // Close the process handle. This will not terminate the process.
+ void Close();
+
+ // Returns true if this process is still running. This is only safe on Windows
+ // (and maybe Fuchsia?), because the ProcessHandle will keep the zombie
+ // process information available until itself has been released. But on Posix,
+ // the OS may reuse the ProcessId.
+#if defined(OS_WIN)
+ bool IsRunning() const {
+ return !WaitForExitWithTimeout(base::TimeDelta(), nullptr);
+ }
+#endif
+
+ // Terminates the process with extreme prejudice. The given |exit_code| will
+ // be the exit code of the process. If |wait| is true, this method will wait
+ // for up to one minute for the process to actually terminate.
+ // Returns true if the process terminates within the allowed time.
+ // NOTE: On POSIX |exit_code| is ignored.
+ bool Terminate(int exit_code, bool wait) const;
+
+ // Waits for the process to exit. Returns true on success.
+ // On POSIX, if the process has been signaled then |exit_code| is set to -1.
+ // On Linux this must be a child process, however on Mac and Windows it can be
+ // any process.
+ // NOTE: |exit_code| is optional, nullptr can be passed if the exit code is
+ // not required.
+ bool WaitForExit(int* exit_code) const;
+
+ // Same as WaitForExit() but only waits for up to |timeout|.
+ // NOTE: |exit_code| is optional, nullptr can be passed if the exit code
+ // is not required.
+ bool WaitForExitWithTimeout(TimeDelta timeout, int* exit_code) const;
+
+ // Indicates that the process has exited with the specified |exit_code|.
+ // This should be called if process exit is observed outside of this class.
+ // (i.e. Not because Terminate or WaitForExit, above, was called.)
+ // Note that nothing prevents this being called multiple times for a dead
+ // process though that should be avoided.
+ void Exited(int exit_code) const;
+
+#if defined(OS_MACOSX)
+ // The Mac needs a Mach port in order to manipulate a process's priority,
+ // and there's no good way to get that from base given the pid. These Mac
+ // variants of the IsProcessBackgrounded and SetProcessBackgrounded API take
+ // a port provider for this reason. See crbug.com/460102
+ //
+ // A process is backgrounded when its task priority is
+ // |TASK_BACKGROUND_APPLICATION|.
+ //
+ // Returns true if the port_provider can locate a task port for the process
+ // and it is backgrounded. If port_provider is null, returns false.
+ bool IsProcessBackgrounded(PortProvider* port_provider) const;
+
+ // Set the process as backgrounded. If value is
+ // true, the priority of the associated task will be set to
+ // TASK_BACKGROUND_APPLICATION. If value is false, the
+ // priority of the process will be set to TASK_FOREGROUND_APPLICATION.
+ //
+ // Returns true if the priority was changed, false otherwise. If
+ // |port_provider| is null, this is a no-op and it returns false.
+ bool SetProcessBackgrounded(PortProvider* port_provider, bool value);
+#else
+ // A process is backgrounded when it's priority is lower than normal.
+ // Return true if this process is backgrounded, false otherwise.
+ bool IsProcessBackgrounded() const;
+
+ // Set a process as backgrounded. If value is true, the priority of the
+ // process will be lowered. If value is false, the priority of the process
+ // will be made "normal" - equivalent to default process priority.
+ // Returns true if the priority was changed, false otherwise.
+ bool SetProcessBackgrounded(bool value);
+#endif // defined(OS_MACOSX)
+ // Returns an integer representing the priority of a process. The meaning
+ // of this value is OS dependent.
+ int GetPriority() const;
+
+#if defined(OS_CHROMEOS)
+ // Get the PID in its PID namespace.
+ // If the process is not in a PID namespace or /proc/<pid>/status does not
+ // report NSpid, kNullProcessId is returned.
+ ProcessId GetPidInNamespace() const;
+#endif
+
+ private:
+#if defined(OS_WIN)
+ win::ScopedHandle process_;
+#elif defined(OS_FUCHSIA)
+ zx::process process_;
+#else
+ ProcessHandle process_;
+#endif
+
+#if defined(OS_WIN) || defined(OS_FUCHSIA)
+ bool is_current_process_;
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(Process);
+};
+
+#if defined(OS_CHROMEOS)
+// Exposed for testing.
+// Given the contents of the /proc/<pid>/cgroup file, determine whether the
+// process is backgrounded or not.
+BASE_EXPORT bool IsProcessBackgroundedCGroup(
+ const StringPiece& cgroup_contents);
+#endif // defined(OS_CHROMEOS)
+
+} // namespace base
+
+#endif // BASE_PROCESS_PROCESS_H_
diff --git a/security/sandbox/chromium/base/process/process_handle.h b/security/sandbox/chromium/base/process/process_handle.h
new file mode 100644
index 0000000000..94f7006119
--- /dev/null
+++ b/security/sandbox/chromium/base/process/process_handle.h
@@ -0,0 +1,142 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_PROCESS_PROCESS_HANDLE_H_
+#define BASE_PROCESS_PROCESS_HANDLE_H_
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include "base/base_export.h"
+#include "base/files/file_path.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include "base/win/windows_types.h"
+#endif
+
+#if defined(OS_FUCHSIA)
+#include <zircon/types.h>
+#endif
+
+namespace base {
+
+// ProcessHandle is a platform specific type which represents the underlying OS
+// handle to a process.
+// ProcessId is a number which identifies the process in the OS.
+#if defined(OS_WIN)
+typedef HANDLE ProcessHandle;
+typedef DWORD ProcessId;
+typedef HANDLE UserTokenHandle;
+const ProcessHandle kNullProcessHandle = NULL;
+const ProcessId kNullProcessId = 0;
+#elif defined(OS_FUCHSIA)
+typedef zx_handle_t ProcessHandle;
+typedef zx_koid_t ProcessId;
+const ProcessHandle kNullProcessHandle = ZX_HANDLE_INVALID;
+const ProcessId kNullProcessId = ZX_KOID_INVALID;
+#elif defined(OS_POSIX)
+// On POSIX, our ProcessHandle will just be the PID.
+typedef pid_t ProcessHandle;
+typedef pid_t ProcessId;
+const ProcessHandle kNullProcessHandle = 0;
+const ProcessId kNullProcessId = 0;
+#endif // defined(OS_WIN)
+
+// To print ProcessIds portably use CrPRIdPid (based on PRIuS and friends from
+// C99 and format_macros.h) like this:
+// base::StringPrintf("PID is %" CrPRIdPid ".\n", pid);
+#if defined(OS_WIN) || defined(OS_FUCHSIA)
+#define CrPRIdPid "ld"
+#else
+#define CrPRIdPid "d"
+#endif
+
+class UniqueProcId {
+ public:
+ explicit UniqueProcId(ProcessId value) : value_(value) {}
+ UniqueProcId(const UniqueProcId& other) = default;
+ UniqueProcId& operator=(const UniqueProcId& other) = default;
+
+ // Returns the process PID. WARNING: On some platforms, the pid may not be
+ // valid within the current process sandbox.
+ ProcessId GetUnsafeValue() const { return value_; }
+
+ bool operator==(const UniqueProcId& other) const {
+ return value_ == other.value_;
+ }
+
+ bool operator!=(const UniqueProcId& other) const {
+ return value_ != other.value_;
+ }
+
+ bool operator<(const UniqueProcId& other) const {
+ return value_ < other.value_;
+ }
+
+ bool operator<=(const UniqueProcId& other) const {
+ return value_ <= other.value_;
+ }
+
+ bool operator>(const UniqueProcId& other) const {
+ return value_ > other.value_;
+ }
+
+ bool operator>=(const UniqueProcId& other) const {
+ return value_ >= other.value_;
+ }
+
+ private:
+ ProcessId value_;
+};
+
+std::ostream& operator<<(std::ostream& os, const UniqueProcId& obj);
+
+// Returns the id of the current process.
+// Note that on some platforms, this is not guaranteed to be unique across
+// processes (use GetUniqueIdForProcess if uniqueness is required).
+BASE_EXPORT ProcessId GetCurrentProcId();
+
+// Returns a unique ID for the current process. The ID will be unique across all
+// currently running processes within the chrome session, but IDs of terminated
+// processes may be reused.
+BASE_EXPORT UniqueProcId GetUniqueIdForProcess();
+
+#if defined(OS_LINUX)
+// When a process is started in a different PID namespace from the browser
+// process, this function must be called with the process's PID in the browser's
+// PID namespace in order to initialize its unique ID. Not thread safe.
+// WARNING: To avoid inconsistent results from GetUniqueIdForProcess, this
+// should only be called very early after process startup - ideally as soon
+// after process creation as possible.
+BASE_EXPORT void InitUniqueIdForProcessInPidNamespace(
+ ProcessId pid_outside_of_namespace);
+#endif
+
+// Returns the ProcessHandle of the current process.
+BASE_EXPORT ProcessHandle GetCurrentProcessHandle();
+
+// Returns the process ID for the specified process. This is functionally the
+// same as Windows' GetProcessId(), but works on versions of Windows before Win
+// XP SP1 as well.
+// DEPRECATED. New code should be using Process::Pid() instead.
+// Note that on some platforms, this is not guaranteed to be unique across
+// processes.
+BASE_EXPORT ProcessId GetProcId(ProcessHandle process);
+
+#if !defined(OS_FUCHSIA)
+// Returns the ID for the parent of the given process. Not available on Fuchsia.
+// Returning a negative value indicates an error, such as if the |process| does
+// not exist. Returns 0 when |process| has no parent process.
+BASE_EXPORT ProcessId GetParentProcessId(ProcessHandle process);
+#endif // !defined(OS_FUCHSIA)
+
+#if defined(OS_POSIX)
+// Returns the path to the executable of the given process.
+BASE_EXPORT FilePath GetProcessExecutablePath(ProcessHandle process);
+#endif
+
+} // namespace base
+
+#endif // BASE_PROCESS_PROCESS_HANDLE_H_
diff --git a/security/sandbox/chromium/base/process/process_handle_win.cc b/security/sandbox/chromium/base/process/process_handle_win.cc
new file mode 100644
index 0000000000..ccc759039d
--- /dev/null
+++ b/security/sandbox/chromium/base/process/process_handle_win.cc
@@ -0,0 +1,52 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/process_handle.h"
+
+#include <windows.h>
+#include <tlhelp32.h>
+
+#include "base/win/scoped_handle.h"
+#include "base/win/windows_version.h"
+
+namespace base {
+
+ProcessId GetCurrentProcId() {
+ return ::GetCurrentProcessId();
+}
+
+ProcessHandle GetCurrentProcessHandle() {
+ return ::GetCurrentProcess();
+}
+
+ProcessId GetProcId(ProcessHandle process) {
+ if (process == base::kNullProcessHandle)
+ return 0;
+ // This returns 0 if we have insufficient rights to query the process handle.
+ // Invalid handles or non-process handles will cause a hard failure.
+ ProcessId result = GetProcessId(process);
+ CHECK(result != 0 || GetLastError() != ERROR_INVALID_HANDLE)
+ << "process handle = " << process;
+ return result;
+}
+
+ProcessId GetParentProcessId(ProcessHandle process) {
+ ProcessId child_pid = GetProcId(process);
+ PROCESSENTRY32 process_entry;
+ process_entry.dwSize = sizeof(PROCESSENTRY32);
+
+ win::ScopedHandle snapshot(CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0));
+ if (snapshot.IsValid() && Process32First(snapshot.Get(), &process_entry)) {
+ do {
+ if (process_entry.th32ProcessID == child_pid)
+ return process_entry.th32ParentProcessID;
+ } while (Process32Next(snapshot.Get(), &process_entry));
+ }
+
+ // TODO(zijiehe): To match other platforms, -1 (UINT32_MAX) should be returned
+ // if |child_id| cannot be found in the |snapshot|.
+ return 0u;
+}
+
+} // namespace base
diff --git a/security/sandbox/chromium/base/rand_util.h b/security/sandbox/chromium/base/rand_util.h
new file mode 100644
index 0000000000..45e4283223
--- /dev/null
+++ b/security/sandbox/chromium/base/rand_util.h
@@ -0,0 +1,78 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_RAND_UTIL_H_
+#define BASE_RAND_UTIL_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <algorithm>
+#include <string>
+
+#include "base/base_export.h"
+#include "build/build_config.h"
+
+namespace base {
+
+// Returns a random number in range [0, UINT64_MAX]. Thread-safe.
+BASE_EXPORT uint64_t RandUint64();
+
+// Returns a random number between min and max (inclusive). Thread-safe.
+BASE_EXPORT int RandInt(int min, int max);
+
+// Returns a random number in range [0, range). Thread-safe.
+BASE_EXPORT uint64_t RandGenerator(uint64_t range);
+
+// Returns a random double in range [0, 1). Thread-safe.
+BASE_EXPORT double RandDouble();
+
+// Given input |bits|, convert with maximum precision to a double in
+// the range [0, 1). Thread-safe.
+BASE_EXPORT double BitsToOpenEndedUnitInterval(uint64_t bits);
+
+// Fills |output_length| bytes of |output| with random data. Thread-safe.
+//
+// Although implementations are required to use a cryptographically secure
+// random number source, code outside of base/ that relies on this should use
+// crypto::RandBytes instead to ensure the requirement is easily discoverable.
+BASE_EXPORT void RandBytes(void* output, size_t output_length);
+
+// Fills a string of length |length| with random data and returns it.
+// |length| should be nonzero. Thread-safe.
+//
+// Note that this is a variation of |RandBytes| with a different return type.
+// The returned string is likely not ASCII/UTF-8. Use with care.
+//
+// Although implementations are required to use a cryptographically secure
+// random number source, code outside of base/ that relies on this should use
+// crypto::RandBytes instead to ensure the requirement is easily discoverable.
+BASE_EXPORT std::string RandBytesAsString(size_t length);
+
+// An STL UniformRandomBitGenerator backed by RandUint64.
+// TODO(tzik): Consider replacing this with a faster implementation.
+class RandomBitGenerator {
+ public:
+ using result_type = uint64_t;
+ static constexpr result_type min() { return 0; }
+ static constexpr result_type max() { return UINT64_MAX; }
+ result_type operator()() const { return RandUint64(); }
+
+ RandomBitGenerator() = default;
+ ~RandomBitGenerator() = default;
+};
+
+// Shuffles [first, last) randomly. Thread-safe.
+template <typename Itr>
+void RandomShuffle(Itr first, Itr last) {
+ std::shuffle(first, last, RandomBitGenerator());
+}
+
+#if defined(OS_POSIX)
+BASE_EXPORT int GetUrandomFD();
+#endif
+
+} // namespace base
+
+#endif // BASE_RAND_UTIL_H_
diff --git a/security/sandbox/chromium/base/rand_util_win.cc b/security/sandbox/chromium/base/rand_util_win.cc
new file mode 100644
index 0000000000..193a3f63a3
--- /dev/null
+++ b/security/sandbox/chromium/base/rand_util_win.cc
@@ -0,0 +1,38 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/rand_util.h"
+
+#include <windows.h>
+#include <stddef.h>
+#include <stdint.h>
+
+// #define needed to link in RtlGenRandom(), a.k.a. SystemFunction036. See the
+// "Community Additions" comment on MSDN here:
+// http://msdn.microsoft.com/en-us/library/windows/desktop/aa387694.aspx
+#define SystemFunction036 NTAPI SystemFunction036
+#include <ntsecapi.h>
+#undef SystemFunction036
+
+#include <algorithm>
+#include <limits>
+
+#include "base/logging.h"
+
+namespace base {
+
+void RandBytes(void* output, size_t output_length) {
+ char* output_ptr = static_cast<char*>(output);
+ while (output_length > 0) {
+ const ULONG output_bytes_this_pass = static_cast<ULONG>(std::min(
+ output_length, static_cast<size_t>(std::numeric_limits<ULONG>::max())));
+ const bool success =
+ RtlGenRandom(output_ptr, output_bytes_this_pass) != FALSE;
+ CHECK(success);
+ output_length -= output_bytes_this_pass;
+ output_ptr += output_bytes_this_pass;
+ }
+}
+
+} // namespace base
diff --git a/security/sandbox/chromium/base/scoped_clear_last_error.h b/security/sandbox/chromium/base/scoped_clear_last_error.h
new file mode 100644
index 0000000000..b19f0436ae
--- /dev/null
+++ b/security/sandbox/chromium/base/scoped_clear_last_error.h
@@ -0,0 +1,58 @@
+// Copyright (c) 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SCOPED_CLEAR_LAST_ERROR_H_
+#define BASE_SCOPED_CLEAR_LAST_ERROR_H_
+
+#include <errno.h>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "build/build_config.h"
+
+namespace base {
+namespace internal {
+
+// ScopedClearLastError stores and resets the value of thread local error codes
+// (errno, GetLastError()), and restores them in the destructor. This is useful
+// to avoid side effects on these values in instrumentation functions that
+// interact with the OS.
+
+// Common implementation of ScopedClearLastError for all platforms. Use
+// ScopedClearLastError instead.
+class BASE_EXPORT ScopedClearLastErrorBase {
+ public:
+ ScopedClearLastErrorBase() : last_errno_(errno) { errno = 0; }
+ ~ScopedClearLastErrorBase() { errno = last_errno_; }
+
+ private:
+ const int last_errno_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedClearLastErrorBase);
+};
+
+#if defined(OS_WIN)
+
+// Windows specific implementation of ScopedClearLastError.
+class BASE_EXPORT ScopedClearLastError : public ScopedClearLastErrorBase {
+ public:
+ ScopedClearLastError();
+ ~ScopedClearLastError();
+
+ private:
+ unsigned int last_system_error_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedClearLastError);
+};
+
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+
+using ScopedClearLastError = ScopedClearLastErrorBase;
+
+#endif // defined(OS_WIN)
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_SCOPED_CLEAR_LAST_ERROR_H_
diff --git a/security/sandbox/chromium/base/scoped_clear_last_error_win.cc b/security/sandbox/chromium/base/scoped_clear_last_error_win.cc
new file mode 100644
index 0000000000..cdf996359e
--- /dev/null
+++ b/security/sandbox/chromium/base/scoped_clear_last_error_win.cc
@@ -0,0 +1,22 @@
+// Copyright (c) 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/scoped_clear_last_error.h"
+
+#include <windows.h>
+
+namespace base {
+namespace internal {
+
+ScopedClearLastError::ScopedClearLastError()
+ : last_system_error_(::GetLastError()) {
+ ::SetLastError(0);
+}
+
+ScopedClearLastError::~ScopedClearLastError() {
+ ::SetLastError(last_system_error_);
+}
+
+} // namespace internal
+} // namespace base
diff --git a/security/sandbox/chromium/base/sequence_checker.h b/security/sandbox/chromium/base/sequence_checker.h
new file mode 100644
index 0000000000..60ffd75a4f
--- /dev/null
+++ b/security/sandbox/chromium/base/sequence_checker.h
@@ -0,0 +1,143 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SEQUENCE_CHECKER_H_
+#define BASE_SEQUENCE_CHECKER_H_
+
+#include "base/compiler_specific.h"
+#include "base/logging.h"
+#include "base/sequence_checker_impl.h"
+#include "base/strings/string_piece.h"
+#include "build/build_config.h"
+
+// SequenceChecker is a helper class used to help verify that some methods of a
+// class are called sequentially (for thread-safety). It supports thread safety
+// annotations (see base/thread_annotations.h).
+//
+// Use the macros below instead of the SequenceChecker directly so that the
+// unused member doesn't result in an extra byte (four when padded) per
+// instance in production.
+//
+// This class is much prefered to ThreadChecker for thread-safety checks.
+// ThreadChecker should only be used for classes that are truly thread-affine
+// (use thread-local-storage or a third-party API that does).
+//
+// Usage:
+// class MyClass {
+// public:
+// MyClass() {
+// // It's sometimes useful to detach on construction for objects that are
+// // constructed in one place and forever after used from another
+// // sequence.
+// DETACH_FROM_SEQUENCE(my_sequence_checker_);
+// }
+//
+// ~MyClass() {
+// // SequenceChecker doesn't automatically check it's destroyed on origin
+// // sequence for the same reason it's sometimes detached in the
+// // constructor. It's okay to destroy off sequence if the owner
+// // otherwise knows usage on the associated sequence is done. If you're
+// // not detaching in the constructor, you probably want to explicitly
+// // check in the destructor.
+// DCHECK_CALLED_ON_VALID_SEQUENCE(my_sequence_checker_);
+// }
+// void MyMethod() {
+// DCHECK_CALLED_ON_VALID_SEQUENCE(my_sequence_checker_);
+// ... (do stuff) ...
+// MyOtherMethod();
+// }
+//
+// void MyOtherMethod()
+// VALID_CONTEXT_REQUIRED(my_sequence_checker_) {
+// foo_ = 42;
+// }
+//
+// private:
+// // GUARDED_BY_CONTEXT() enforces that this member is only
+// // accessed from a scope that invokes DCHECK_CALLED_ON_VALID_SEQUENCE()
+// // or from a function annotated with VALID_CONTEXT_REQUIRED(). A
+// // DCHECK build will not compile if the member is accessed and these
+// // conditions are not met.
+// int foo_ GUARDED_BY_CONTEXT(my_sequence_checker_);
+//
+// SEQUENCE_CHECKER(my_sequence_checker_);
+// }
+
+#define SEQUENCE_CHECKER_INTERNAL_CONCAT2(a, b) a##b
+#define SEQUENCE_CHECKER_INTERNAL_CONCAT(a, b) \
+ SEQUENCE_CHECKER_INTERNAL_CONCAT2(a, b)
+#define SEQUENCE_CHECKER_INTERNAL_UID(prefix) \
+ SEQUENCE_CHECKER_INTERNAL_CONCAT(prefix, __LINE__)
+
+#if DCHECK_IS_ON()
+#define SEQUENCE_CHECKER(name) base::SequenceChecker name
+#define DCHECK_CALLED_ON_VALID_SEQUENCE(name, ...) \
+ base::ScopedValidateSequenceChecker SEQUENCE_CHECKER_INTERNAL_UID( \
+ scoped_validate_sequence_checker_)(name, ##__VA_ARGS__);
+#define DETACH_FROM_SEQUENCE(name) (name).DetachFromSequence()
+#else // DCHECK_IS_ON()
+#if __OBJC__ && defined(OS_IOS) && !HAS_FEATURE(objc_cxx_static_assert)
+// TODO(thakis): Remove this branch once Xcode's clang has clang r356148.
+#define SEQUENCE_CHECKER(name)
+#else
+#define SEQUENCE_CHECKER(name) static_assert(true, "")
+#endif
+#define DCHECK_CALLED_ON_VALID_SEQUENCE(name, ...) EAT_STREAM_PARAMETERS
+#define DETACH_FROM_SEQUENCE(name)
+#endif // DCHECK_IS_ON()
+
+namespace base {
+
+// Do nothing implementation, for use in release mode.
+//
+// Note: You should almost always use the SequenceChecker class (through the
+// above macros) to get the right version for your build configuration.
+// Note: This is only a check, not a "lock". It is marked "LOCKABLE" only in
+// order to support thread_annotations.h.
+class LOCKABLE SequenceCheckerDoNothing {
+ public:
+ SequenceCheckerDoNothing() = default;
+
+ // Moving between matching sequences is allowed to help classes with
+ // SequenceCheckers that want a default move-construct/assign.
+ SequenceCheckerDoNothing(SequenceCheckerDoNothing&& other) = default;
+ SequenceCheckerDoNothing& operator=(SequenceCheckerDoNothing&& other) =
+ default;
+
+ bool CalledOnValidSequence() const WARN_UNUSED_RESULT { return true; }
+ void DetachFromSequence() {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(SequenceCheckerDoNothing);
+};
+
+#if DCHECK_IS_ON()
+class SequenceChecker : public SequenceCheckerImpl {
+};
+#else
+class SequenceChecker : public SequenceCheckerDoNothing {
+};
+#endif // DCHECK_IS_ON()
+
+class SCOPED_LOCKABLE ScopedValidateSequenceChecker {
+ public:
+ explicit ScopedValidateSequenceChecker(const SequenceChecker& checker)
+ EXCLUSIVE_LOCK_FUNCTION(checker) {
+ DCHECK(checker.CalledOnValidSequence());
+ }
+
+ explicit ScopedValidateSequenceChecker(const SequenceChecker& checker,
+ const StringPiece& msg)
+ EXCLUSIVE_LOCK_FUNCTION(checker) {
+ DCHECK(checker.CalledOnValidSequence()) << msg;
+ }
+
+ ~ScopedValidateSequenceChecker() UNLOCK_FUNCTION() {}
+
+ private:
+};
+
+} // namespace base
+
+#endif // BASE_SEQUENCE_CHECKER_H_
diff --git a/security/sandbox/chromium/base/sequence_checker_impl.h b/security/sandbox/chromium/base/sequence_checker_impl.h
new file mode 100644
index 0000000000..ea0fbb5bfa
--- /dev/null
+++ b/security/sandbox/chromium/base/sequence_checker_impl.h
@@ -0,0 +1,63 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SEQUENCE_CHECKER_IMPL_H_
+#define BASE_SEQUENCE_CHECKER_IMPL_H_
+
+#include <memory>
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "base/synchronization/lock.h"
+#include "base/thread_annotations.h"
+
+namespace base {
+
+// Real implementation of SequenceChecker for use in debug mode or for temporary
+// use in release mode (e.g. to CHECK on a threading issue seen only in the
+// wild).
+//
+// Note: You should almost always use the SequenceChecker class to get the right
+// version for your build configuration.
+// Note: This is only a check, not a "lock". It is marked "LOCKABLE" only in
+// order to support thread_annotations.h.
+class LOCKABLE BASE_EXPORT SequenceCheckerImpl {
+ public:
+ SequenceCheckerImpl();
+ ~SequenceCheckerImpl();
+
+ // Allow move construct/assign. This must be called on |other|'s associated
+ // sequence and assignment can only be made into a SequenceCheckerImpl which
+ // is detached or already associated with the current sequence. This isn't
+ // thread-safe (|this| and |other| shouldn't be in use while this move is
+ // performed). If the assignment was legal, the resulting SequenceCheckerImpl
+ // will be bound to the current sequence and |other| will be detached.
+ SequenceCheckerImpl(SequenceCheckerImpl&& other);
+ SequenceCheckerImpl& operator=(SequenceCheckerImpl&& other);
+
+ // Returns true if called in sequence with previous calls to this method and
+ // the constructor.
+ bool CalledOnValidSequence() const WARN_UNUSED_RESULT;
+
+ // Unbinds the checker from the currently associated sequence. The checker
+ // will be re-bound on the next call to CalledOnValidSequence().
+ void DetachFromSequence();
+
+ private:
+ class Core;
+
+ // Calls straight to ThreadLocalStorage::HasBeenDestroyed(). Exposed purely
+ // for 'friend' to work.
+ static bool HasThreadLocalStorageBeenDestroyed();
+
+ mutable Lock lock_;
+ mutable std::unique_ptr<Core> core_ GUARDED_BY(lock_);
+
+ DISALLOW_COPY_AND_ASSIGN(SequenceCheckerImpl);
+};
+
+} // namespace base
+
+#endif // BASE_SEQUENCE_CHECKER_IMPL_H_
diff --git a/security/sandbox/chromium/base/sequence_token.h b/security/sandbox/chromium/base/sequence_token.h
new file mode 100644
index 0000000000..6e7d191ae8
--- /dev/null
+++ b/security/sandbox/chromium/base/sequence_token.h
@@ -0,0 +1,115 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SEQUENCE_TOKEN_H_
+#define BASE_SEQUENCE_TOKEN_H_
+
+#include "base/base_export.h"
+#include "base/macros.h"
+
+namespace base {
+
+// A token that identifies a series of sequenced tasks (i.e. tasks that run one
+// at a time in posting order).
+class BASE_EXPORT SequenceToken {
+ public:
+ // Instantiates an invalid SequenceToken.
+ SequenceToken() = default;
+
+ // Explicitly allow copy.
+ SequenceToken(const SequenceToken& other) = default;
+ SequenceToken& operator=(const SequenceToken& other) = default;
+
+ // An invalid SequenceToken is not equal to any other SequenceToken, including
+ // other invalid SequenceTokens.
+ bool operator==(const SequenceToken& other) const;
+ bool operator!=(const SequenceToken& other) const;
+
+ // Returns true if this is a valid SequenceToken.
+ bool IsValid() const;
+
+ // Returns the integer uniquely representing this SequenceToken. This method
+ // should only be used for tracing and debugging.
+ int ToInternalValue() const;
+
+ // Returns a valid SequenceToken which isn't equal to any previously returned
+ // SequenceToken.
+ static SequenceToken Create();
+
+ // Returns the SequenceToken associated with the task running on the current
+ // thread, as determined by the active ScopedSetSequenceTokenForCurrentThread
+ // if any.
+ static SequenceToken GetForCurrentThread();
+
+ private:
+ explicit SequenceToken(int token) : token_(token) {}
+
+ static constexpr int kInvalidSequenceToken = -1;
+ int token_ = kInvalidSequenceToken;
+};
+
+// A token that identifies a task.
+//
+// This is used by ThreadCheckerImpl to determine whether calls to
+// CalledOnValidThread() come from the same task and hence are deterministically
+// single-threaded (vs. calls coming from different sequenced or parallel tasks,
+// which may or may not run on the same thread).
+class BASE_EXPORT TaskToken {
+ public:
+ // Instantiates an invalid TaskToken.
+ TaskToken() = default;
+
+ // Explicitly allow copy.
+ TaskToken(const TaskToken& other) = default;
+ TaskToken& operator=(const TaskToken& other) = default;
+
+ // An invalid TaskToken is not equal to any other TaskToken, including
+ // other invalid TaskTokens.
+ bool operator==(const TaskToken& other) const;
+ bool operator!=(const TaskToken& other) const;
+
+ // Returns true if this is a valid TaskToken.
+ bool IsValid() const;
+
+ // In the scope of a ScopedSetSequenceTokenForCurrentThread, returns a valid
+ // TaskToken which isn't equal to any TaskToken returned in the scope of a
+ // different ScopedSetSequenceTokenForCurrentThread. Otherwise, returns an
+ // invalid TaskToken.
+ static TaskToken GetForCurrentThread();
+
+ private:
+ friend class ScopedSetSequenceTokenForCurrentThread;
+
+ explicit TaskToken(int token) : token_(token) {}
+
+ // Returns a valid TaskToken which isn't equal to any previously returned
+ // TaskToken. This is private as it only meant to be instantiated by
+ // ScopedSetSequenceTokenForCurrentThread.
+ static TaskToken Create();
+
+ static constexpr int kInvalidTaskToken = -1;
+ int token_ = kInvalidTaskToken;
+};
+
+// Instantiate this in the scope where a single task runs.
+class BASE_EXPORT ScopedSetSequenceTokenForCurrentThread {
+ public:
+ // Throughout the lifetime of the constructed object,
+ // SequenceToken::GetForCurrentThread() will return |sequence_token| and
+ // TaskToken::GetForCurrentThread() will return a TaskToken which is not equal
+ // to any TaskToken returned in the scope of another
+ // ScopedSetSequenceTokenForCurrentThread.
+ ScopedSetSequenceTokenForCurrentThread(const SequenceToken& sequence_token);
+ ~ScopedSetSequenceTokenForCurrentThread();
+
+ private:
+ const SequenceToken sequence_token_;
+ const TaskToken task_token_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedSetSequenceTokenForCurrentThread);
+};
+
+} // namespace base
+
+#endif // BASE_SEQUENCE_TOKEN_H_
diff --git a/security/sandbox/chromium/base/sequenced_task_runner.h b/security/sandbox/chromium/base/sequenced_task_runner.h
new file mode 100644
index 0000000000..976f87ff2a
--- /dev/null
+++ b/security/sandbox/chromium/base/sequenced_task_runner.h
@@ -0,0 +1,201 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SEQUENCED_TASK_RUNNER_H_
+#define BASE_SEQUENCED_TASK_RUNNER_H_
+
+#include <memory>
+
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/sequenced_task_runner_helpers.h"
+#include "base/task_runner.h"
+
+namespace base {
+
+// A SequencedTaskRunner is a subclass of TaskRunner that provides
+// additional guarantees on the order that tasks are started, as well
+// as guarantees on when tasks are in sequence, i.e. one task finishes
+// before the other one starts.
+//
+// Summary
+// -------
+// Non-nested tasks with the same delay will run one by one in FIFO
+// order.
+//
+// Detailed guarantees
+// -------------------
+//
+// SequencedTaskRunner also adds additional methods for posting
+// non-nestable tasks. In general, an implementation of TaskRunner
+// may expose task-running methods which are themselves callable from
+// within tasks. A non-nestable task is one that is guaranteed to not
+// be run from within an already-running task. Conversely, a nestable
+// task (the default) is a task that can be run from within an
+// already-running task.
+//
+// The guarantees of SequencedTaskRunner are as follows:
+//
+// - Given two tasks T2 and T1, T2 will start after T1 starts if:
+//
+// * T2 is posted after T1; and
+// * T2 has equal or higher delay than T1; and
+// * T2 is non-nestable or T1 is nestable.
+//
+// - If T2 will start after T1 starts by the above guarantee, then
+// T2 will start after T1 finishes and is destroyed if:
+//
+// * T2 is non-nestable, or
+// * T1 doesn't call any task-running methods.
+//
+// - If T2 will start after T1 finishes by the above guarantee, then
+// all memory changes in T1 and T1's destruction will be visible
+// to T2.
+//
+// - If T2 runs nested within T1 via a call to the task-running
+// method M, then all memory changes in T1 up to the call to M
+// will be visible to T2, and all memory changes in T2 will be
+// visible to T1 from the return from M.
+//
+// Note that SequencedTaskRunner does not guarantee that tasks are run
+// on a single dedicated thread, although the above guarantees provide
+// most (but not all) of the same guarantees. If you do need to
+// guarantee that tasks are run on a single dedicated thread, see
+// SingleThreadTaskRunner (in single_thread_task_runner.h).
+//
+// Some corollaries to the above guarantees, assuming the tasks in
+// question don't call any task-running methods:
+//
+// - Tasks posted via PostTask are run in FIFO order.
+//
+// - Tasks posted via PostNonNestableTask are run in FIFO order.
+//
+// - Tasks posted with the same delay and the same nestable state
+// are run in FIFO order.
+//
+// - A list of tasks with the same nestable state posted in order of
+// non-decreasing delay is run in FIFO order.
+//
+// - A list of tasks posted in order of non-decreasing delay with at
+// most a single change in nestable state from nestable to
+// non-nestable is run in FIFO order. (This is equivalent to the
+// statement of the first guarantee above.)
+//
+// Some theoretical implementations of SequencedTaskRunner:
+//
+// - A SequencedTaskRunner that wraps a regular TaskRunner but makes
+// sure that only one task at a time is posted to the TaskRunner,
+// with appropriate memory barriers in between tasks.
+//
+// - A SequencedTaskRunner that, for each task, spawns a joinable
+// thread to run that task and immediately quit, and then
+// immediately joins that thread.
+//
+// - A SequencedTaskRunner that stores the list of posted tasks and
+// has a method Run() that runs each runnable task in FIFO order
+// that can be called from any thread, but only if another
+// (non-nested) Run() call isn't already happening.
+class BASE_EXPORT SequencedTaskRunner : public TaskRunner {
+ public:
+ // The two PostNonNestable*Task methods below are like their
+ // nestable equivalents in TaskRunner, but they guarantee that the
+ // posted task will not run nested within an already-running task.
+ //
+ // A simple corollary is that posting a task as non-nestable can
+ // only delay when the task gets run. That is, posting a task as
+ // non-nestable may not affect when the task gets run, or it could
+ // make it run later than it normally would, but it won't make it
+ // run earlier than it normally would.
+
+ // TODO(akalin): Get rid of the boolean return value for the methods
+ // below.
+
+ bool PostNonNestableTask(const Location& from_here, OnceClosure task);
+
+ virtual bool PostNonNestableDelayedTask(const Location& from_here,
+ OnceClosure task,
+ base::TimeDelta delay) = 0;
+
+ // Submits a non-nestable task to delete the given object. Returns
+ // true if the object may be deleted at some point in the future,
+ // and false if the object definitely will not be deleted.
+ template <class T>
+ bool DeleteSoon(const Location& from_here, const T* object) {
+ return DeleteOrReleaseSoonInternal(from_here, &DeleteHelper<T>::DoDelete,
+ object);
+ }
+
+ template <class T>
+ bool DeleteSoon(const Location& from_here, std::unique_ptr<T> object) {
+ return DeleteSoon(from_here, object.release());
+ }
+
+ // Submits a non-nestable task to release the given object.
+ //
+ // ReleaseSoon makes sure that the object it the scoped_refptr points to gets
+ // properly released on the correct thread.
+ // We apply ReleaseSoon to the rvalue as the side-effects can be unclear to
+ // the caller if an lvalue is used. That being so, the scoped_refptr should
+ // always be std::move'd.
+ // Example use:
+ //
+ // scoped_refptr<T> foo_scoped_refptr;
+ // ...
+ // task_runner->ReleaseSoon(std::move(foo_scoped_refptr));
+ template <class T>
+ void ReleaseSoon(const Location& from_here, scoped_refptr<T>&& object) {
+ if (!object)
+ return;
+
+ DeleteOrReleaseSoonInternal(from_here, &ReleaseHelper<T>::DoRelease,
+ object.release());
+ }
+
+ // Returns true iff tasks posted to this TaskRunner are sequenced
+ // with this call.
+ //
+ // In particular:
+ // - Returns true if this is a SequencedTaskRunner to which the
+ // current task was posted.
+ // - Returns true if this is a SequencedTaskRunner bound to the
+ // same sequence as the SequencedTaskRunner to which the current
+ // task was posted.
+ // - Returns true if this is a SingleThreadTaskRunner bound to
+ // the current thread.
+ virtual bool RunsTasksInCurrentSequence() const = 0;
+
+ protected:
+ ~SequencedTaskRunner() override = default;
+
+ private:
+ bool DeleteOrReleaseSoonInternal(const Location& from_here,
+ void (*deleter)(const void*),
+ const void* object);
+};
+
+// Sample usage with std::unique_ptr :
+// std::unique_ptr<Foo, base::OnTaskRunnerDeleter> ptr(
+// new Foo, base::OnTaskRunnerDeleter(my_task_runner));
+//
+// For RefCounted see base::RefCountedDeleteOnSequence.
+struct BASE_EXPORT OnTaskRunnerDeleter {
+ explicit OnTaskRunnerDeleter(scoped_refptr<SequencedTaskRunner> task_runner);
+ ~OnTaskRunnerDeleter();
+
+ OnTaskRunnerDeleter(OnTaskRunnerDeleter&&);
+ OnTaskRunnerDeleter& operator=(OnTaskRunnerDeleter&&);
+
+ // For compatibility with std:: deleters.
+ template <typename T>
+ void operator()(const T* ptr) {
+ if (ptr)
+ task_runner_->DeleteSoon(FROM_HERE, ptr);
+ }
+
+ scoped_refptr<SequencedTaskRunner> task_runner_;
+};
+
+} // namespace base
+
+#endif // BASE_SEQUENCED_TASK_RUNNER_H_
diff --git a/security/sandbox/chromium/base/sequenced_task_runner_helpers.h b/security/sandbox/chromium/base/sequenced_task_runner_helpers.h
new file mode 100644
index 0000000000..18ec0e26f5
--- /dev/null
+++ b/security/sandbox/chromium/base/sequenced_task_runner_helpers.h
@@ -0,0 +1,42 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SEQUENCED_TASK_RUNNER_HELPERS_H_
+#define BASE_SEQUENCED_TASK_RUNNER_HELPERS_H_
+
+namespace base {
+
+class SequencedTaskRunner;
+
+// Template helpers which use function indirection to erase T from the
+// function signature while still remembering it so we can call the
+// correct destructor/release function.
+//
+// We use this trick so we don't need to include bind.h in a header
+// file like sequenced_task_runner.h. We also wrap the helpers in a
+// templated class to make it easier for users of DeleteSoon to
+// declare the helper as a friend.
+template <class T>
+class DeleteHelper {
+ private:
+ static void DoDelete(const void* object) {
+ delete static_cast<const T*>(object);
+ }
+
+ friend class SequencedTaskRunner;
+};
+
+template <class T>
+class ReleaseHelper {
+ private:
+ static void DoRelease(const void* object) {
+ static_cast<const T*>(object)->Release();
+ }
+
+ friend class SequencedTaskRunner;
+};
+
+} // namespace base
+
+#endif // BASE_SEQUENCED_TASK_RUNNER_HELPERS_H_
diff --git a/security/sandbox/chromium/base/single_thread_task_runner.h b/security/sandbox/chromium/base/single_thread_task_runner.h
new file mode 100644
index 0000000000..4d6938ed6c
--- /dev/null
+++ b/security/sandbox/chromium/base/single_thread_task_runner.h
@@ -0,0 +1,36 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SINGLE_THREAD_TASK_RUNNER_H_
+#define BASE_SINGLE_THREAD_TASK_RUNNER_H_
+
+#include "base/base_export.h"
+#include "base/sequenced_task_runner.h"
+
+namespace base {
+
+// A SingleThreadTaskRunner is a SequencedTaskRunner with one more
+// guarantee; namely, that all tasks are run on a single dedicated
+// thread. Most use cases require only a SequencedTaskRunner, unless
+// there is a specific need to run tasks on only a single thread.
+//
+// SingleThreadTaskRunner implementations might:
+// - Post tasks to an existing thread's MessageLoop (see
+// MessageLoop::task_runner()).
+// - Create their own worker thread and MessageLoop to post tasks to.
+// - Add tasks to a FIFO and signal to a non-MessageLoop thread for them to
+// be processed. This allows TaskRunner-oriented code run on threads
+// running other kinds of message loop, e.g. Jingle threads.
+class BASE_EXPORT SingleThreadTaskRunner : public SequencedTaskRunner {
+ public:
+ // A more explicit alias to RunsTasksInCurrentSequence().
+ bool BelongsToCurrentThread() const { return RunsTasksInCurrentSequence(); }
+
+ protected:
+ ~SingleThreadTaskRunner() override = default;
+};
+
+} // namespace base
+
+#endif // BASE_SINGLE_THREAD_TASK_RUNNER_H_
diff --git a/security/sandbox/chromium/base/stl_util.h b/security/sandbox/chromium/base/stl_util.h
new file mode 100644
index 0000000000..83d86ad90d
--- /dev/null
+++ b/security/sandbox/chromium/base/stl_util.h
@@ -0,0 +1,681 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Derived from google3/util/gtl/stl_util.h
+
+#ifndef BASE_STL_UTIL_H_
+#define BASE_STL_UTIL_H_
+
+#include <algorithm>
+#include <deque>
+#include <forward_list>
+#include <functional>
+#include <initializer_list>
+#include <iterator>
+#include <list>
+#include <map>
+#include <set>
+#include <string>
+#include <type_traits>
+#include <unordered_map>
+#include <unordered_set>
+#include <utility>
+#include <vector>
+
+#include "base/logging.h"
+#include "base/optional.h"
+#include "base/template_util.h"
+
+namespace base {
+
+namespace internal {
+
+// Calls erase on iterators of matching elements.
+template <typename Container, typename Predicate>
+void IterateAndEraseIf(Container& container, Predicate pred) {
+ for (auto it = container.begin(); it != container.end();) {
+ if (pred(*it))
+ it = container.erase(it);
+ else
+ ++it;
+ }
+}
+
+template <typename Iter>
+constexpr bool IsRandomAccessIter =
+ std::is_same<typename std::iterator_traits<Iter>::iterator_category,
+ std::random_access_iterator_tag>::value;
+
+// Utility type traits used for specializing base::Contains() below.
+template <typename Container, typename Element, typename = void>
+struct HasFindWithNpos : std::false_type {};
+
+template <typename Container, typename Element>
+struct HasFindWithNpos<
+ Container,
+ Element,
+ void_t<decltype(std::declval<const Container&>().find(
+ std::declval<const Element&>()) != Container::npos)>>
+ : std::true_type {};
+
+template <typename Container, typename Element, typename = void>
+struct HasFindWithEnd : std::false_type {};
+
+template <typename Container, typename Element>
+struct HasFindWithEnd<Container,
+ Element,
+ void_t<decltype(std::declval<const Container&>().find(
+ std::declval<const Element&>()) !=
+ std::declval<const Container&>().end())>>
+ : std::true_type {};
+
+template <typename Container, typename Element, typename = void>
+struct HasContains : std::false_type {};
+
+template <typename Container, typename Element>
+struct HasContains<Container,
+ Element,
+ void_t<decltype(std::declval<const Container&>().contains(
+ std::declval<const Element&>()))>> : std::true_type {};
+
+} // namespace internal
+
+// C++14 implementation of C++17's std::size():
+// http://en.cppreference.com/w/cpp/iterator/size
+template <typename Container>
+constexpr auto size(const Container& c) -> decltype(c.size()) {
+ return c.size();
+}
+
+template <typename T, size_t N>
+constexpr size_t size(const T (&array)[N]) noexcept {
+ return N;
+}
+
+// C++14 implementation of C++17's std::empty():
+// http://en.cppreference.com/w/cpp/iterator/empty
+template <typename Container>
+constexpr auto empty(const Container& c) -> decltype(c.empty()) {
+ return c.empty();
+}
+
+template <typename T, size_t N>
+constexpr bool empty(const T (&array)[N]) noexcept {
+ return false;
+}
+
+template <typename T>
+constexpr bool empty(std::initializer_list<T> il) noexcept {
+ return il.size() == 0;
+}
+
+// C++14 implementation of C++17's std::data():
+// http://en.cppreference.com/w/cpp/iterator/data
+template <typename Container>
+constexpr auto data(Container& c) -> decltype(c.data()) {
+ return c.data();
+}
+
+// std::basic_string::data() had no mutable overload prior to C++17 [1].
+// Hence this overload is provided.
+// Note: str[0] is safe even for empty strings, as they are guaranteed to be
+// null-terminated [2].
+//
+// [1] http://en.cppreference.com/w/cpp/string/basic_string/data
+// [2] http://en.cppreference.com/w/cpp/string/basic_string/operator_at
+template <typename CharT, typename Traits, typename Allocator>
+CharT* data(std::basic_string<CharT, Traits, Allocator>& str) {
+ return std::addressof(str[0]);
+}
+
+template <typename Container>
+constexpr auto data(const Container& c) -> decltype(c.data()) {
+ return c.data();
+}
+
+template <typename T, size_t N>
+constexpr T* data(T (&array)[N]) noexcept {
+ return array;
+}
+
+template <typename T>
+constexpr const T* data(std::initializer_list<T> il) noexcept {
+ return il.begin();
+}
+
+// std::array::data() was not constexpr prior to C++17 [1].
+// Hence these overloads are provided.
+//
+// [1] https://en.cppreference.com/w/cpp/container/array/data
+template <typename T, size_t N>
+constexpr T* data(std::array<T, N>& array) noexcept {
+ return !array.empty() ? &array[0] : nullptr;
+}
+
+template <typename T, size_t N>
+constexpr const T* data(const std::array<T, N>& array) noexcept {
+ return !array.empty() ? &array[0] : nullptr;
+}
+
+// C++14 implementation of C++17's std::as_const():
+// https://en.cppreference.com/w/cpp/utility/as_const
+template <typename T>
+constexpr std::add_const_t<T>& as_const(T& t) noexcept {
+ return t;
+}
+
+template <typename T>
+void as_const(const T&& t) = delete;
+
+// Returns a const reference to the underlying container of a container adapter.
+// Works for std::priority_queue, std::queue, and std::stack.
+template <class A>
+const typename A::container_type& GetUnderlyingContainer(const A& adapter) {
+ struct ExposedAdapter : A {
+ using A::c;
+ };
+ return adapter.*&ExposedAdapter::c;
+}
+
+// Clears internal memory of an STL object.
+// STL clear()/reserve(0) does not always free internal memory allocated
+// This function uses swap/destructor to ensure the internal memory is freed.
+template<class T>
+void STLClearObject(T* obj) {
+ T tmp;
+ tmp.swap(*obj);
+ // Sometimes "T tmp" allocates objects with memory (arena implementation?).
+ // Hence using additional reserve(0) even if it doesn't always work.
+ obj->reserve(0);
+}
+
+// Counts the number of instances of val in a container.
+template <typename Container, typename T>
+typename std::iterator_traits<
+ typename Container::const_iterator>::difference_type
+STLCount(const Container& container, const T& val) {
+ return std::count(container.begin(), container.end(), val);
+}
+
+// General purpose implementation to check if |container| contains |value|.
+template <typename Container,
+ typename Value,
+ std::enable_if_t<
+ !internal::HasFindWithNpos<Container, Value>::value &&
+ !internal::HasFindWithEnd<Container, Value>::value &&
+ !internal::HasContains<Container, Value>::value>* = nullptr>
+bool Contains(const Container& container, const Value& value) {
+ using std::begin;
+ using std::end;
+ return std::find(begin(container), end(container), value) != end(container);
+}
+
+// Specialized Contains() implementation for when |container| has a find()
+// member function and a static npos member, but no contains() member function.
+template <typename Container,
+ typename Value,
+ std::enable_if_t<internal::HasFindWithNpos<Container, Value>::value &&
+ !internal::HasContains<Container, Value>::value>* =
+ nullptr>
+bool Contains(const Container& container, const Value& value) {
+ return container.find(value) != Container::npos;
+}
+
+// Specialized Contains() implementation for when |container| has a find()
+// and end() member function, but no contains() member function.
+template <typename Container,
+ typename Value,
+ std::enable_if_t<internal::HasFindWithEnd<Container, Value>::value &&
+ !internal::HasContains<Container, Value>::value>* =
+ nullptr>
+bool Contains(const Container& container, const Value& value) {
+ return container.find(value) != container.end();
+}
+
+// Specialized Contains() implementation for when |container| has a contains()
+// member function.
+template <
+ typename Container,
+ typename Value,
+ std::enable_if_t<internal::HasContains<Container, Value>::value>* = nullptr>
+bool Contains(const Container& container, const Value& value) {
+ return container.contains(value);
+}
+
+// O(1) implementation of const casting an iterator for any sequence,
+// associative or unordered associative container in the STL.
+//
+// Reference: https://stackoverflow.com/a/10669041
+template <typename Container,
+ typename ConstIter,
+ std::enable_if_t<!internal::IsRandomAccessIter<ConstIter>>* = nullptr>
+constexpr auto ConstCastIterator(Container& c, ConstIter it) {
+ return c.erase(it, it);
+}
+
+// Explicit overload for std::forward_list where erase() is named erase_after().
+template <typename T, typename Allocator>
+constexpr auto ConstCastIterator(
+ std::forward_list<T, Allocator>& c,
+ typename std::forward_list<T, Allocator>::const_iterator it) {
+// The erase_after(it, it) trick used below does not work for libstdc++ [1],
+// thus we need a different way.
+// TODO(crbug.com/972541): Remove this workaround once libstdc++ is fixed on all
+// platforms.
+//
+// [1] https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90857
+#if defined(__GLIBCXX__)
+ return c.insert_after(it, {});
+#else
+ return c.erase_after(it, it);
+#endif
+}
+
+// Specialized O(1) const casting for random access iterators. This is
+// necessary, because erase() is either not available (e.g. array-like
+// containers), or has O(n) complexity (e.g. std::deque or std::vector).
+template <typename Container,
+ typename ConstIter,
+ std::enable_if_t<internal::IsRandomAccessIter<ConstIter>>* = nullptr>
+constexpr auto ConstCastIterator(Container& c, ConstIter it) {
+ using std::begin;
+ using std::cbegin;
+ return begin(c) + (it - cbegin(c));
+}
+
+namespace internal {
+
+template <typename Map, typename Key, typename Value>
+std::pair<typename Map::iterator, bool> InsertOrAssignImpl(Map& map,
+ Key&& key,
+ Value&& value) {
+ auto lower = map.lower_bound(key);
+ if (lower != map.end() && !map.key_comp()(key, lower->first)) {
+ // key already exists, perform assignment.
+ lower->second = std::forward<Value>(value);
+ return {lower, false};
+ }
+
+ // key did not yet exist, insert it.
+ return {map.emplace_hint(lower, std::forward<Key>(key),
+ std::forward<Value>(value)),
+ true};
+}
+
+template <typename Map, typename Key, typename Value>
+typename Map::iterator InsertOrAssignImpl(Map& map,
+ typename Map::const_iterator hint,
+ Key&& key,
+ Value&& value) {
+ auto&& key_comp = map.key_comp();
+ if ((hint == map.begin() || key_comp(std::prev(hint)->first, key))) {
+ if (hint == map.end() || key_comp(key, hint->first)) {
+ // *(hint - 1) < key < *hint => key did not exist and hint is correct.
+ return map.emplace_hint(hint, std::forward<Key>(key),
+ std::forward<Value>(value));
+ }
+
+ if (!key_comp(hint->first, key)) {
+ // key == *hint => key already exists and hint is correct.
+ auto mutable_hint = ConstCastIterator(map, hint);
+ mutable_hint->second = std::forward<Value>(value);
+ return mutable_hint;
+ }
+ }
+
+ // hint was not helpful, dispatch to hintless version.
+ return InsertOrAssignImpl(map, std::forward<Key>(key),
+ std::forward<Value>(value))
+ .first;
+}
+
+template <typename Map, typename Key, typename... Args>
+std::pair<typename Map::iterator, bool> TryEmplaceImpl(Map& map,
+ Key&& key,
+ Args&&... args) {
+ auto lower = map.lower_bound(key);
+ if (lower != map.end() && !map.key_comp()(key, lower->first)) {
+ // key already exists, do nothing.
+ return {lower, false};
+ }
+
+ // key did not yet exist, insert it.
+ return {map.emplace_hint(lower, std::piecewise_construct,
+ std::forward_as_tuple(std::forward<Key>(key)),
+ std::forward_as_tuple(std::forward<Args>(args)...)),
+ true};
+}
+
+template <typename Map, typename Key, typename... Args>
+typename Map::iterator TryEmplaceImpl(Map& map,
+ typename Map::const_iterator hint,
+ Key&& key,
+ Args&&... args) {
+ auto&& key_comp = map.key_comp();
+ if ((hint == map.begin() || key_comp(std::prev(hint)->first, key))) {
+ if (hint == map.end() || key_comp(key, hint->first)) {
+ // *(hint - 1) < key < *hint => key did not exist and hint is correct.
+ return map.emplace_hint(
+ hint, std::piecewise_construct,
+ std::forward_as_tuple(std::forward<Key>(key)),
+ std::forward_as_tuple(std::forward<Args>(args)...));
+ }
+
+ if (!key_comp(hint->first, key)) {
+ // key == *hint => no-op, return correct hint.
+ return ConstCastIterator(map, hint);
+ }
+ }
+
+ // hint was not helpful, dispatch to hintless version.
+ return TryEmplaceImpl(map, std::forward<Key>(key),
+ std::forward<Args>(args)...)
+ .first;
+}
+
+} // namespace internal
+
+// Implementation of C++17's std::map::insert_or_assign as a free function.
+template <typename Map, typename Value>
+std::pair<typename Map::iterator, bool>
+InsertOrAssign(Map& map, const typename Map::key_type& key, Value&& value) {
+ return internal::InsertOrAssignImpl(map, key, std::forward<Value>(value));
+}
+
+template <typename Map, typename Value>
+std::pair<typename Map::iterator, bool>
+InsertOrAssign(Map& map, typename Map::key_type&& key, Value&& value) {
+ return internal::InsertOrAssignImpl(map, std::move(key),
+ std::forward<Value>(value));
+}
+
+// Implementation of C++17's std::map::insert_or_assign with hint as a free
+// function.
+template <typename Map, typename Value>
+typename Map::iterator InsertOrAssign(Map& map,
+ typename Map::const_iterator hint,
+ const typename Map::key_type& key,
+ Value&& value) {
+ return internal::InsertOrAssignImpl(map, hint, key,
+ std::forward<Value>(value));
+}
+
+template <typename Map, typename Value>
+typename Map::iterator InsertOrAssign(Map& map,
+ typename Map::const_iterator hint,
+ typename Map::key_type&& key,
+ Value&& value) {
+ return internal::InsertOrAssignImpl(map, hint, std::move(key),
+ std::forward<Value>(value));
+}
+
+// Implementation of C++17's std::map::try_emplace as a free function.
+template <typename Map, typename... Args>
+std::pair<typename Map::iterator, bool>
+TryEmplace(Map& map, const typename Map::key_type& key, Args&&... args) {
+ return internal::TryEmplaceImpl(map, key, std::forward<Args>(args)...);
+}
+
+template <typename Map, typename... Args>
+std::pair<typename Map::iterator, bool> TryEmplace(Map& map,
+ typename Map::key_type&& key,
+ Args&&... args) {
+ return internal::TryEmplaceImpl(map, std::move(key),
+ std::forward<Args>(args)...);
+}
+
+// Implementation of C++17's std::map::try_emplace with hint as a free
+// function.
+template <typename Map, typename... Args>
+typename Map::iterator TryEmplace(Map& map,
+ typename Map::const_iterator hint,
+ const typename Map::key_type& key,
+ Args&&... args) {
+ return internal::TryEmplaceImpl(map, hint, key, std::forward<Args>(args)...);
+}
+
+template <typename Map, typename... Args>
+typename Map::iterator TryEmplace(Map& map,
+ typename Map::const_iterator hint,
+ typename Map::key_type&& key,
+ Args&&... args) {
+ return internal::TryEmplaceImpl(map, hint, std::move(key),
+ std::forward<Args>(args)...);
+}
+
+// Returns true if the container is sorted.
+template <typename Container>
+bool STLIsSorted(const Container& cont) {
+ return std::is_sorted(std::begin(cont), std::end(cont));
+}
+
+// Returns a new ResultType containing the difference of two sorted containers.
+template <typename ResultType, typename Arg1, typename Arg2>
+ResultType STLSetDifference(const Arg1& a1, const Arg2& a2) {
+ DCHECK(STLIsSorted(a1));
+ DCHECK(STLIsSorted(a2));
+ ResultType difference;
+ std::set_difference(a1.begin(), a1.end(),
+ a2.begin(), a2.end(),
+ std::inserter(difference, difference.end()));
+ return difference;
+}
+
+// Returns a new ResultType containing the union of two sorted containers.
+template <typename ResultType, typename Arg1, typename Arg2>
+ResultType STLSetUnion(const Arg1& a1, const Arg2& a2) {
+ DCHECK(STLIsSorted(a1));
+ DCHECK(STLIsSorted(a2));
+ ResultType result;
+ std::set_union(a1.begin(), a1.end(),
+ a2.begin(), a2.end(),
+ std::inserter(result, result.end()));
+ return result;
+}
+
+// Returns a new ResultType containing the intersection of two sorted
+// containers.
+template <typename ResultType, typename Arg1, typename Arg2>
+ResultType STLSetIntersection(const Arg1& a1, const Arg2& a2) {
+ DCHECK(STLIsSorted(a1));
+ DCHECK(STLIsSorted(a2));
+ ResultType result;
+ std::set_intersection(a1.begin(), a1.end(),
+ a2.begin(), a2.end(),
+ std::inserter(result, result.end()));
+ return result;
+}
+
+// Returns true if the sorted container |a1| contains all elements of the sorted
+// container |a2|.
+template <typename Arg1, typename Arg2>
+bool STLIncludes(const Arg1& a1, const Arg2& a2) {
+ DCHECK(STLIsSorted(a1));
+ DCHECK(STLIsSorted(a2));
+ return std::includes(a1.begin(), a1.end(),
+ a2.begin(), a2.end());
+}
+
+// Erase/EraseIf are based on library fundamentals ts v2 erase/erase_if
+// http://en.cppreference.com/w/cpp/experimental/lib_extensions_2
+// They provide a generic way to erase elements from a container.
+// The functions here implement these for the standard containers until those
+// functions are available in the C++ standard.
+// For Chromium containers overloads should be defined in their own headers
+// (like standard containers).
+// Note: there is no std::erase for standard associative containers so we don't
+// have it either.
+
+template <typename CharT, typename Traits, typename Allocator, typename Value>
+void Erase(std::basic_string<CharT, Traits, Allocator>& container,
+ const Value& value) {
+ container.erase(std::remove(container.begin(), container.end(), value),
+ container.end());
+}
+
+template <typename CharT, typename Traits, typename Allocator, class Predicate>
+void EraseIf(std::basic_string<CharT, Traits, Allocator>& container,
+ Predicate pred) {
+ container.erase(std::remove_if(container.begin(), container.end(), pred),
+ container.end());
+}
+
+template <class T, class Allocator, class Value>
+void Erase(std::deque<T, Allocator>& container, const Value& value) {
+ container.erase(std::remove(container.begin(), container.end(), value),
+ container.end());
+}
+
+template <class T, class Allocator, class Predicate>
+void EraseIf(std::deque<T, Allocator>& container, Predicate pred) {
+ container.erase(std::remove_if(container.begin(), container.end(), pred),
+ container.end());
+}
+
+template <class T, class Allocator, class Value>
+void Erase(std::vector<T, Allocator>& container, const Value& value) {
+ container.erase(std::remove(container.begin(), container.end(), value),
+ container.end());
+}
+
+template <class T, class Allocator, class Predicate>
+void EraseIf(std::vector<T, Allocator>& container, Predicate pred) {
+ container.erase(std::remove_if(container.begin(), container.end(), pred),
+ container.end());
+}
+
+template <class T, class Allocator, class Value>
+void Erase(std::forward_list<T, Allocator>& container, const Value& value) {
+ // Unlike std::forward_list::remove, this function template accepts
+ // heterogeneous types and does not force a conversion to the container's
+ // value type before invoking the == operator.
+ container.remove_if([&](const T& cur) { return cur == value; });
+}
+
+template <class T, class Allocator, class Predicate>
+void EraseIf(std::forward_list<T, Allocator>& container, Predicate pred) {
+ container.remove_if(pred);
+}
+
+template <class T, class Allocator, class Value>
+void Erase(std::list<T, Allocator>& container, const Value& value) {
+ // Unlike std::list::remove, this function template accepts heterogeneous
+ // types and does not force a conversion to the container's value type before
+ // invoking the == operator.
+ container.remove_if([&](const T& cur) { return cur == value; });
+}
+
+template <class T, class Allocator, class Predicate>
+void EraseIf(std::list<T, Allocator>& container, Predicate pred) {
+ container.remove_if(pred);
+}
+
+template <class Key, class T, class Compare, class Allocator, class Predicate>
+void EraseIf(std::map<Key, T, Compare, Allocator>& container, Predicate pred) {
+ internal::IterateAndEraseIf(container, pred);
+}
+
+template <class Key, class T, class Compare, class Allocator, class Predicate>
+void EraseIf(std::multimap<Key, T, Compare, Allocator>& container,
+ Predicate pred) {
+ internal::IterateAndEraseIf(container, pred);
+}
+
+template <class Key, class Compare, class Allocator, class Predicate>
+void EraseIf(std::set<Key, Compare, Allocator>& container, Predicate pred) {
+ internal::IterateAndEraseIf(container, pred);
+}
+
+template <class Key, class Compare, class Allocator, class Predicate>
+void EraseIf(std::multiset<Key, Compare, Allocator>& container,
+ Predicate pred) {
+ internal::IterateAndEraseIf(container, pred);
+}
+
+template <class Key,
+ class T,
+ class Hash,
+ class KeyEqual,
+ class Allocator,
+ class Predicate>
+void EraseIf(std::unordered_map<Key, T, Hash, KeyEqual, Allocator>& container,
+ Predicate pred) {
+ internal::IterateAndEraseIf(container, pred);
+}
+
+template <class Key,
+ class T,
+ class Hash,
+ class KeyEqual,
+ class Allocator,
+ class Predicate>
+void EraseIf(
+ std::unordered_multimap<Key, T, Hash, KeyEqual, Allocator>& container,
+ Predicate pred) {
+ internal::IterateAndEraseIf(container, pred);
+}
+
+template <class Key,
+ class Hash,
+ class KeyEqual,
+ class Allocator,
+ class Predicate>
+void EraseIf(std::unordered_set<Key, Hash, KeyEqual, Allocator>& container,
+ Predicate pred) {
+ internal::IterateAndEraseIf(container, pred);
+}
+
+template <class Key,
+ class Hash,
+ class KeyEqual,
+ class Allocator,
+ class Predicate>
+void EraseIf(std::unordered_multiset<Key, Hash, KeyEqual, Allocator>& container,
+ Predicate pred) {
+ internal::IterateAndEraseIf(container, pred);
+}
+
+// A helper class to be used as the predicate with |EraseIf| to implement
+// in-place set intersection. Helps implement the algorithm of going through
+// each container an element at a time, erasing elements from the first
+// container if they aren't in the second container. Requires each container be
+// sorted. Note that the logic below appears inverted since it is returning
+// whether an element should be erased.
+template <class Collection>
+class IsNotIn {
+ public:
+ explicit IsNotIn(const Collection& collection)
+ : i_(collection.begin()), end_(collection.end()) {}
+
+ bool operator()(const typename Collection::value_type& x) {
+ while (i_ != end_ && *i_ < x)
+ ++i_;
+ if (i_ == end_)
+ return true;
+ if (*i_ == x) {
+ ++i_;
+ return false;
+ }
+ return true;
+ }
+
+ private:
+ typename Collection::const_iterator i_;
+ const typename Collection::const_iterator end_;
+};
+
+// Helper for returning the optional value's address, or nullptr.
+template <class T>
+T* OptionalOrNullptr(base::Optional<T>& optional) {
+ return optional.has_value() ? &optional.value() : nullptr;
+}
+
+template <class T>
+const T* OptionalOrNullptr(const base::Optional<T>& optional) {
+ return optional.has_value() ? &optional.value() : nullptr;
+}
+
+} // namespace base
+
+#endif // BASE_STL_UTIL_H_
diff --git a/security/sandbox/chromium/base/strings/char_traits.h b/security/sandbox/chromium/base/strings/char_traits.h
new file mode 100644
index 0000000000..b193e216cc
--- /dev/null
+++ b/security/sandbox/chromium/base/strings/char_traits.h
@@ -0,0 +1,92 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_STRINGS_CHAR_TRAITS_H_
+#define BASE_STRINGS_CHAR_TRAITS_H_
+
+#include <stddef.h>
+
+#include "base/compiler_specific.h"
+
+namespace base {
+
+// constexpr version of http://en.cppreference.com/w/cpp/string/char_traits.
+// This currently just implements the bits needed to support a (mostly)
+// constexpr StringPiece.
+//
+// TODO(dcheng): Once we switch to C++17, most methods will become constexpr and
+// we can switch over to using the one in the standard library.
+template <typename T>
+struct CharTraits {
+ // Performs a lexographical comparison of the first N characters of |s1| and
+ // |s2|. Returns 0 if equal, -1 if |s1| is less than |s2|, and 1 if |s1| is
+ // greater than |s2|.
+ static constexpr int compare(const T* s1, const T* s2, size_t n) noexcept;
+
+ // Returns the length of |s|, assuming null termination (and not including the
+ // terminating null).
+ static constexpr size_t length(const T* s) noexcept;
+};
+
+template <typename T>
+constexpr int CharTraits<T>::compare(const T* s1,
+ const T* s2,
+ size_t n) noexcept {
+ for (; n; --n, ++s1, ++s2) {
+ if (*s1 < *s2)
+ return -1;
+ if (*s1 > *s2)
+ return 1;
+ }
+ return 0;
+}
+
+template <typename T>
+constexpr size_t CharTraits<T>::length(const T* s) noexcept {
+ size_t i = 0;
+ for (; *s; ++s)
+ ++i;
+ return i;
+}
+
+// char specialization of CharTraits that can use clang's constexpr instrinsics,
+// where available.
+template <>
+struct CharTraits<char> {
+ static constexpr int compare(const char* s1,
+ const char* s2,
+ size_t n) noexcept;
+ static constexpr size_t length(const char* s) noexcept;
+};
+
+constexpr int CharTraits<char>::compare(const char* s1,
+ const char* s2,
+ size_t n) noexcept {
+#if HAS_FEATURE(cxx_constexpr_string_builtins)
+ return __builtin_memcmp(s1, s2, n);
+#else
+ for (; n; --n, ++s1, ++s2) {
+ if (*s1 < *s2)
+ return -1;
+ if (*s1 > *s2)
+ return 1;
+ }
+ return 0;
+#endif
+}
+
+constexpr size_t CharTraits<char>::length(const char* s) noexcept {
+#if defined(__clang__)
+ return __builtin_strlen(s);
+#else
+ size_t i = 0;
+ for (; *s; ++s)
+ ++i;
+ return i;
+#endif
+}
+
+} // namespace base
+
+#endif // BASE_STRINGS_CHAR_TRAITS_H_
diff --git a/security/sandbox/chromium/base/strings/nullable_string16.cc b/security/sandbox/chromium/base/strings/nullable_string16.cc
new file mode 100644
index 0000000000..076b282eb1
--- /dev/null
+++ b/security/sandbox/chromium/base/strings/nullable_string16.cc
@@ -0,0 +1,33 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/nullable_string16.h"
+
+#include <ostream>
+#include <utility>
+
+namespace base {
+NullableString16::NullableString16() = default;
+NullableString16::NullableString16(const NullableString16& other) = default;
+NullableString16::NullableString16(NullableString16&& other) = default;
+
+NullableString16::NullableString16(const string16& string, bool is_null) {
+ if (!is_null)
+ string_.emplace(string);
+}
+
+NullableString16::NullableString16(Optional<string16> optional_string16)
+ : string_(std::move(optional_string16)) {}
+
+NullableString16::~NullableString16() = default;
+NullableString16& NullableString16::operator=(const NullableString16& other) =
+ default;
+NullableString16& NullableString16::operator=(NullableString16&& other) =
+ default;
+
+std::ostream& operator<<(std::ostream& out, const NullableString16& value) {
+ return value.is_null() ? out << "(null)" : out << value.string();
+}
+
+} // namespace base
diff --git a/security/sandbox/chromium/base/strings/nullable_string16.h b/security/sandbox/chromium/base/strings/nullable_string16.h
new file mode 100644
index 0000000000..abddee0f74
--- /dev/null
+++ b/security/sandbox/chromium/base/strings/nullable_string16.h
@@ -0,0 +1,55 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_STRINGS_NULLABLE_STRING16_H_
+#define BASE_STRINGS_NULLABLE_STRING16_H_
+
+#include <iosfwd>
+
+#include "base/base_export.h"
+#include "base/optional.h"
+#include "base/strings/string16.h"
+#include "base/strings/string_util.h"
+
+namespace base {
+
+// This class is a simple wrapper for string16 which also contains a null
+// state. This should be used only where the difference between null and
+// empty is meaningful.
+class BASE_EXPORT NullableString16 {
+ public:
+ NullableString16();
+ NullableString16(const NullableString16& other);
+ NullableString16(NullableString16&& other);
+ NullableString16(const string16& string, bool is_null);
+ explicit NullableString16(Optional<string16> optional_string16);
+ ~NullableString16();
+
+ NullableString16& operator=(const NullableString16& other);
+ NullableString16& operator=(NullableString16&& other);
+
+ const string16& string() const {
+ return string_ ? *string_ : EmptyString16();
+ }
+ bool is_null() const { return !string_; }
+ const Optional<string16>& as_optional_string16() const { return string_; }
+
+ private:
+ Optional<string16> string_;
+};
+
+inline bool operator==(const NullableString16& a, const NullableString16& b) {
+ return a.as_optional_string16() == b.as_optional_string16();
+}
+
+inline bool operator!=(const NullableString16& a, const NullableString16& b) {
+ return !(a == b);
+}
+
+BASE_EXPORT std::ostream& operator<<(std::ostream& out,
+ const NullableString16& value);
+
+} // namespace base
+
+#endif // BASE_STRINGS_NULLABLE_STRING16_H_
diff --git a/security/sandbox/chromium/base/strings/safe_sprintf.cc b/security/sandbox/chromium/base/strings/safe_sprintf.cc
new file mode 100644
index 0000000000..89049abd79
--- /dev/null
+++ b/security/sandbox/chromium/base/strings/safe_sprintf.cc
@@ -0,0 +1,682 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/safe_sprintf.h"
+
+#include <errno.h>
+#include <string.h>
+
+#include <algorithm>
+#include <limits>
+
+#include "base/macros.h"
+#include "build/build_config.h"
+
+#if !defined(NDEBUG)
+// In debug builds, we use RAW_CHECK() to print useful error messages, if
+// SafeSPrintf() is called with broken arguments.
+// As our contract promises that SafeSPrintf() can be called from any
+// restricted run-time context, it is not actually safe to call logging
+// functions from it; and we only ever do so for debug builds and hope for the
+// best. We should _never_ call any logging function other than RAW_CHECK(),
+// and we should _never_ include any logging code that is active in production
+// builds. Most notably, we should not include these logging functions in
+// unofficial release builds, even though those builds would otherwise have
+// DCHECKS() enabled.
+// In other words; please do not remove the #ifdef around this #include.
+// Instead, in production builds we opt for returning a degraded result,
+// whenever an error is encountered.
+// E.g. The broken function call
+// SafeSPrintf("errno = %d (%x)", errno, strerror(errno))
+// will print something like
+// errno = 13, (%x)
+// instead of
+// errno = 13 (Access denied)
+// In most of the anticipated use cases, that's probably the preferred
+// behavior.
+#include "base/logging.h"
+#define DEBUG_CHECK RAW_CHECK
+#else
+#define DEBUG_CHECK(x) do { if (x) { } } while (0)
+#endif
+
+namespace base {
+namespace strings {
+
+// The code in this file is extremely careful to be async-signal-safe.
+//
+// Most obviously, we avoid calling any code that could dynamically allocate
+// memory. Doing so would almost certainly result in bugs and dead-locks.
+// We also avoid calling any other STL functions that could have unintended
+// side-effects involving memory allocation or access to other shared
+// resources.
+//
+// But on top of that, we also avoid calling other library functions, as many
+// of them have the side-effect of calling getenv() (in order to deal with
+// localization) or accessing errno. The latter sounds benign, but there are
+// several execution contexts where it isn't even possible to safely read let
+// alone write errno.
+//
+// The stated design goal of the SafeSPrintf() function is that it can be
+// called from any context that can safely call C or C++ code (i.e. anything
+// that doesn't require assembly code).
+//
+// For a brief overview of some but not all of the issues with async-signal-
+// safety, refer to:
+// http://pubs.opengroup.org/onlinepubs/009695399/functions/xsh_chap02_04.html
+
+namespace {
+const size_t kSSizeMaxConst = ((size_t)(ssize_t)-1) >> 1;
+
+const char kUpCaseHexDigits[] = "0123456789ABCDEF";
+const char kDownCaseHexDigits[] = "0123456789abcdef";
+}
+
+#if defined(NDEBUG)
+// We would like to define kSSizeMax as std::numeric_limits<ssize_t>::max(),
+// but C++ doesn't allow us to do that for constants. Instead, we have to
+// use careful casting and shifting. We later use a static_assert to
+// verify that this worked correctly.
+namespace {
+const size_t kSSizeMax = kSSizeMaxConst;
+}
+#else // defined(NDEBUG)
+// For efficiency, we really need kSSizeMax to be a constant. But for unit
+// tests, it should be adjustable. This allows us to verify edge cases without
+// having to fill the entire available address space. As a compromise, we make
+// kSSizeMax adjustable in debug builds, and then only compile that particular
+// part of the unit test in debug builds.
+namespace {
+static size_t kSSizeMax = kSSizeMaxConst;
+}
+
+namespace internal {
+void SetSafeSPrintfSSizeMaxForTest(size_t max) {
+ kSSizeMax = max;
+}
+
+size_t GetSafeSPrintfSSizeMaxForTest() {
+ return kSSizeMax;
+}
+}
+#endif // defined(NDEBUG)
+
+namespace {
+class Buffer {
+ public:
+ // |buffer| is caller-allocated storage that SafeSPrintf() writes to. It
+ // has |size| bytes of writable storage. It is the caller's responsibility
+ // to ensure that the buffer is at least one byte in size, so that it fits
+ // the trailing NUL that will be added by the destructor. The buffer also
+ // must be smaller or equal to kSSizeMax in size.
+ Buffer(char* buffer, size_t size)
+ : buffer_(buffer),
+ size_(size - 1), // Account for trailing NUL byte
+ count_(0) {
+// MSVS2013's standard library doesn't mark max() as constexpr yet. cl.exe
+// supports static_cast but doesn't really implement constexpr yet so it doesn't
+// complain, but clang does.
+#if __cplusplus >= 201103 && !(defined(__clang__) && defined(OS_WIN))
+ static_assert(kSSizeMaxConst ==
+ static_cast<size_t>(std::numeric_limits<ssize_t>::max()),
+ "kSSizeMaxConst should be the max value of an ssize_t");
+#endif
+ DEBUG_CHECK(size > 0);
+ DEBUG_CHECK(size <= kSSizeMax);
+ }
+
+ ~Buffer() {
+ // The code calling the constructor guaranteed that there was enough space
+ // to store a trailing NUL -- and in debug builds, we are actually
+ // verifying this with DEBUG_CHECK()s in the constructor. So, we can
+ // always unconditionally write the NUL byte in the destructor. We do not
+ // need to adjust the count_, as SafeSPrintf() copies snprintf() in not
+ // including the NUL byte in its return code.
+ *GetInsertionPoint() = '\000';
+ }
+
+ // Returns true, iff the buffer is filled all the way to |kSSizeMax-1|. The
+ // caller can now stop adding more data, as GetCount() has reached its
+ // maximum possible value.
+ inline bool OutOfAddressableSpace() const {
+ return count_ == static_cast<size_t>(kSSizeMax - 1);
+ }
+
+ // Returns the number of bytes that would have been emitted to |buffer_|
+ // if it was sized sufficiently large. This number can be larger than
+ // |size_|, if the caller provided an insufficiently large output buffer.
+ // But it will never be bigger than |kSSizeMax-1|.
+ inline ssize_t GetCount() const {
+ DEBUG_CHECK(count_ < kSSizeMax);
+ return static_cast<ssize_t>(count_);
+ }
+
+ // Emits one |ch| character into the |buffer_| and updates the |count_| of
+ // characters that are currently supposed to be in the buffer.
+ // Returns "false", iff the buffer was already full.
+ // N.B. |count_| increases even if no characters have been written. This is
+ // needed so that GetCount() can return the number of bytes that should
+ // have been allocated for the |buffer_|.
+ inline bool Out(char ch) {
+ if (size_ >= 1 && count_ < size_) {
+ buffer_[count_] = ch;
+ return IncrementCountByOne();
+ }
+ // |count_| still needs to be updated, even if the buffer has been
+ // filled completely. This allows SafeSPrintf() to return the number of
+ // bytes that should have been emitted.
+ IncrementCountByOne();
+ return false;
+ }
+
+ // Inserts |padding|-|len| bytes worth of padding into the |buffer_|.
+ // |count_| will also be incremented by the number of bytes that were meant
+ // to be emitted. The |pad| character is typically either a ' ' space
+ // or a '0' zero, but other non-NUL values are legal.
+ // Returns "false", iff the the |buffer_| filled up (i.e. |count_|
+ // overflowed |size_|) at any time during padding.
+ inline bool Pad(char pad, size_t padding, size_t len) {
+ DEBUG_CHECK(pad);
+ DEBUG_CHECK(padding <= kSSizeMax);
+ for (; padding > len; --padding) {
+ if (!Out(pad)) {
+ if (--padding) {
+ IncrementCount(padding-len);
+ }
+ return false;
+ }
+ }
+ return true;
+ }
+
+ // POSIX doesn't define any async-signal-safe function for converting
+ // an integer to ASCII. Define our own version.
+ //
+ // This also gives us the ability to make the function a little more
+ // powerful and have it deal with |padding|, with truncation, and with
+ // predicting the length of the untruncated output.
+ //
+ // IToASCII() converts an integer |i| to ASCII.
+ //
+ // Unlike similar functions in the standard C library, it never appends a
+ // NUL character. This is left for the caller to do.
+ //
+ // While the function signature takes a signed int64_t, the code decides at
+ // run-time whether to treat the argument as signed (int64_t) or as unsigned
+ // (uint64_t) based on the value of |sign|.
+ //
+ // It supports |base|s 2 through 16. Only a |base| of 10 is allowed to have
+ // a |sign|. Otherwise, |i| is treated as unsigned.
+ //
+ // For bases larger than 10, |upcase| decides whether lower-case or upper-
+ // case letters should be used to designate digits greater than 10.
+ //
+ // Padding can be done with either '0' zeros or ' ' spaces. Padding has to
+ // be positive and will always be applied to the left of the output.
+ //
+ // Prepends a |prefix| to the number (e.g. "0x"). This prefix goes to
+ // the left of |padding|, if |pad| is '0'; and to the right of |padding|
+ // if |pad| is ' '.
+ //
+ // Returns "false", if the |buffer_| overflowed at any time.
+ bool IToASCII(bool sign, bool upcase, int64_t i, int base,
+ char pad, size_t padding, const char* prefix);
+
+ private:
+ // Increments |count_| by |inc| unless this would cause |count_| to
+ // overflow |kSSizeMax-1|. Returns "false", iff an overflow was detected;
+ // it then clamps |count_| to |kSSizeMax-1|.
+ inline bool IncrementCount(size_t inc) {
+ // "inc" is either 1 or a "padding" value. Padding is clamped at
+ // run-time to at most kSSizeMax-1. So, we know that "inc" is always in
+ // the range 1..kSSizeMax-1.
+ // This allows us to compute "kSSizeMax - 1 - inc" without incurring any
+ // integer overflows.
+ DEBUG_CHECK(inc <= kSSizeMax - 1);
+ if (count_ > kSSizeMax - 1 - inc) {
+ count_ = kSSizeMax - 1;
+ return false;
+ }
+ count_ += inc;
+ return true;
+ }
+
+ // Convenience method for the common case of incrementing |count_| by one.
+ inline bool IncrementCountByOne() {
+ return IncrementCount(1);
+ }
+
+ // Return the current insertion point into the buffer. This is typically
+ // at |buffer_| + |count_|, but could be before that if truncation
+ // happened. It always points to one byte past the last byte that was
+ // successfully placed into the |buffer_|.
+ inline char* GetInsertionPoint() const {
+ size_t idx = count_;
+ if (idx > size_) {
+ idx = size_;
+ }
+ return buffer_ + idx;
+ }
+
+ // User-provided buffer that will receive the fully formatted output string.
+ char* buffer_;
+
+ // Number of bytes that are available in the buffer excluding the trailing
+ // NUL byte that will be added by the destructor.
+ const size_t size_;
+
+ // Number of bytes that would have been emitted to the buffer, if the buffer
+ // was sufficiently big. This number always excludes the trailing NUL byte
+ // and it is guaranteed to never grow bigger than kSSizeMax-1.
+ size_t count_;
+
+ DISALLOW_COPY_AND_ASSIGN(Buffer);
+};
+
+
+bool Buffer::IToASCII(bool sign, bool upcase, int64_t i, int base,
+ char pad, size_t padding, const char* prefix) {
+ // Sanity check for parameters. None of these should ever fail, but see
+ // above for the rationale why we can't call CHECK().
+ DEBUG_CHECK(base >= 2);
+ DEBUG_CHECK(base <= 16);
+ DEBUG_CHECK(!sign || base == 10);
+ DEBUG_CHECK(pad == '0' || pad == ' ');
+ DEBUG_CHECK(padding <= kSSizeMax);
+ DEBUG_CHECK(!(sign && prefix && *prefix));
+
+ // Handle negative numbers, if the caller indicated that |i| should be
+ // treated as a signed number; otherwise treat |i| as unsigned (even if the
+ // MSB is set!)
+ // Details are tricky, because of limited data-types, but equivalent pseudo-
+ // code would look like:
+ // if (sign && i < 0)
+ // prefix = "-";
+ // num = abs(i);
+ int minint = 0;
+ uint64_t num;
+ if (sign && i < 0) {
+ prefix = "-";
+
+ // Turn our number positive.
+ if (i == std::numeric_limits<int64_t>::min()) {
+ // The most negative integer needs special treatment.
+ minint = 1;
+ num = static_cast<uint64_t>(-(i + 1));
+ } else {
+ // "Normal" negative numbers are easy.
+ num = static_cast<uint64_t>(-i);
+ }
+ } else {
+ num = static_cast<uint64_t>(i);
+ }
+
+ // If padding with '0' zero, emit the prefix or '-' character now. Otherwise,
+ // make the prefix accessible in reverse order, so that we can later output
+ // it right between padding and the number.
+ // We cannot choose the easier approach of just reversing the number, as that
+ // fails in situations where we need to truncate numbers that have padding
+ // and/or prefixes.
+ const char* reverse_prefix = nullptr;
+ if (prefix && *prefix) {
+ if (pad == '0') {
+ while (*prefix) {
+ if (padding) {
+ --padding;
+ }
+ Out(*prefix++);
+ }
+ prefix = nullptr;
+ } else {
+ for (reverse_prefix = prefix; *reverse_prefix; ++reverse_prefix) {
+ }
+ }
+ } else
+ prefix = nullptr;
+ const size_t prefix_length = reverse_prefix - prefix;
+
+ // Loop until we have converted the entire number. Output at least one
+ // character (i.e. '0').
+ size_t start = count_;
+ size_t discarded = 0;
+ bool started = false;
+ do {
+ // Make sure there is still enough space left in our output buffer.
+ if (count_ >= size_) {
+ if (start < size_) {
+ // It is rare that we need to output a partial number. But if asked
+ // to do so, we will still make sure we output the correct number of
+ // leading digits.
+ // Since we are generating the digits in reverse order, we actually
+ // have to discard digits in the order that we have already emitted
+ // them. This is essentially equivalent to:
+ // memmove(buffer_ + start, buffer_ + start + 1, size_ - start - 1)
+ for (char* move = buffer_ + start, *end = buffer_ + size_ - 1;
+ move < end;
+ ++move) {
+ *move = move[1];
+ }
+ ++discarded;
+ --count_;
+ } else if (count_ - size_ > 1) {
+ // Need to increment either |count_| or |discarded| to make progress.
+ // The latter is more efficient, as it eventually triggers fast
+ // handling of padding. But we have to ensure we don't accidentally
+ // change the overall state (i.e. switch the state-machine from
+ // discarding to non-discarding). |count_| needs to always stay
+ // bigger than |size_|.
+ --count_;
+ ++discarded;
+ }
+ }
+
+ // Output the next digit and (if necessary) compensate for the most
+ // negative integer needing special treatment. This works because,
+ // no matter the bit width of the integer, the lowest-most decimal
+ // integer always ends in 2, 4, 6, or 8.
+ if (!num && started) {
+ if (reverse_prefix > prefix) {
+ Out(*--reverse_prefix);
+ } else {
+ Out(pad);
+ }
+ } else {
+ started = true;
+ Out((upcase ? kUpCaseHexDigits : kDownCaseHexDigits)[num%base + minint]);
+ }
+
+ minint = 0;
+ num /= base;
+
+ // Add padding, if requested.
+ if (padding > 0) {
+ --padding;
+
+ // Performance optimization for when we are asked to output excessive
+ // padding, but our output buffer is limited in size. Even if we output
+ // a 64bit number in binary, we would never write more than 64 plus
+ // prefix non-padding characters. So, once this limit has been passed,
+ // any further state change can be computed arithmetically; we know that
+ // by this time, our entire final output consists of padding characters
+ // that have all already been output.
+ if (discarded > 8*sizeof(num) + prefix_length) {
+ IncrementCount(padding);
+ padding = 0;
+ }
+ }
+ } while (num || padding || (reverse_prefix > prefix));
+
+ // Conversion to ASCII actually resulted in the digits being in reverse
+ // order. We can't easily generate them in forward order, as we can't tell
+ // the number of characters needed until we are done converting.
+ // So, now, we reverse the string (except for the possible '-' sign).
+ char* front = buffer_ + start;
+ char* back = GetInsertionPoint();
+ while (--back > front) {
+ char ch = *back;
+ *back = *front;
+ *front++ = ch;
+ }
+
+ IncrementCount(discarded);
+ return !discarded;
+}
+
+} // anonymous namespace
+
+namespace internal {
+
+ssize_t SafeSNPrintf(char* buf, size_t sz, const char* fmt, const Arg* args,
+ const size_t max_args) {
+ // Make sure that at least one NUL byte can be written, and that the buffer
+ // never overflows kSSizeMax. Not only does that use up most or all of the
+ // address space, it also would result in a return code that cannot be
+ // represented.
+ if (static_cast<ssize_t>(sz) < 1)
+ return -1;
+ sz = std::min(sz, kSSizeMax);
+
+ // Iterate over format string and interpret '%' arguments as they are
+ // encountered.
+ Buffer buffer(buf, sz);
+ size_t padding;
+ char pad;
+ for (unsigned int cur_arg = 0; *fmt && !buffer.OutOfAddressableSpace(); ) {
+ if (*fmt++ == '%') {
+ padding = 0;
+ pad = ' ';
+ char ch = *fmt++;
+ format_character_found:
+ switch (ch) {
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ // Found a width parameter. Convert to an integer value and store in
+ // "padding". If the leading digit is a zero, change the padding
+ // character from a space ' ' to a zero '0'.
+ pad = ch == '0' ? '0' : ' ';
+ for (;;) {
+ // The maximum allowed padding fills all the available address
+ // space and leaves just enough space to insert the trailing NUL.
+ const size_t max_padding = kSSizeMax - 1;
+ if (padding > max_padding/10 ||
+ 10*padding > max_padding - (ch - '0')) {
+ DEBUG_CHECK(padding <= max_padding/10 &&
+ 10*padding <= max_padding - (ch - '0'));
+ // Integer overflow detected. Skip the rest of the width until
+ // we find the format character, then do the normal error handling.
+ padding_overflow:
+ padding = max_padding;
+ while ((ch = *fmt++) >= '0' && ch <= '9') {
+ }
+ if (cur_arg < max_args) {
+ ++cur_arg;
+ }
+ goto fail_to_expand;
+ }
+ padding = 10*padding + ch - '0';
+ if (padding > max_padding) {
+ // This doesn't happen for "sane" values of kSSizeMax. But once
+ // kSSizeMax gets smaller than about 10, our earlier range checks
+ // are incomplete. Unittests do trigger this artificial corner
+ // case.
+ DEBUG_CHECK(padding <= max_padding);
+ goto padding_overflow;
+ }
+ ch = *fmt++;
+ if (ch < '0' || ch > '9') {
+ // Reached the end of the width parameter. This is where the format
+ // character is found.
+ goto format_character_found;
+ }
+ }
+ break;
+ case 'c': { // Output an ASCII character.
+ // Check that there are arguments left to be inserted.
+ if (cur_arg >= max_args) {
+ DEBUG_CHECK(cur_arg < max_args);
+ goto fail_to_expand;
+ }
+
+ // Check that the argument has the expected type.
+ const Arg& arg = args[cur_arg++];
+ if (arg.type != Arg::INT && arg.type != Arg::UINT) {
+ DEBUG_CHECK(arg.type == Arg::INT || arg.type == Arg::UINT);
+ goto fail_to_expand;
+ }
+
+ // Apply padding, if needed.
+ buffer.Pad(' ', padding, 1);
+
+ // Convert the argument to an ASCII character and output it.
+ char as_char = static_cast<char>(arg.integer.i);
+ if (!as_char) {
+ goto end_of_output_buffer;
+ }
+ buffer.Out(as_char);
+ break; }
+ case 'd': // Output a possibly signed decimal value.
+ case 'o': // Output an unsigned octal value.
+ case 'x': // Output an unsigned hexadecimal value.
+ case 'X':
+ case 'p': { // Output a pointer value.
+ // Check that there are arguments left to be inserted.
+ if (cur_arg >= max_args) {
+ DEBUG_CHECK(cur_arg < max_args);
+ goto fail_to_expand;
+ }
+
+ const Arg& arg = args[cur_arg++];
+ int64_t i;
+ const char* prefix = nullptr;
+ if (ch != 'p') {
+ // Check that the argument has the expected type.
+ if (arg.type != Arg::INT && arg.type != Arg::UINT) {
+ DEBUG_CHECK(arg.type == Arg::INT || arg.type == Arg::UINT);
+ goto fail_to_expand;
+ }
+ i = arg.integer.i;
+
+ if (ch != 'd') {
+ // The Arg() constructor automatically performed sign expansion on
+ // signed parameters. This is great when outputting a %d decimal
+ // number, but can result in unexpected leading 0xFF bytes when
+ // outputting a %x hexadecimal number. Mask bits, if necessary.
+ // We have to do this here, instead of in the Arg() constructor, as
+ // the Arg() constructor cannot tell whether we will output a %d
+ // or a %x. Only the latter should experience masking.
+ if (arg.integer.width < sizeof(int64_t)) {
+ i &= (1LL << (8*arg.integer.width)) - 1;
+ }
+ }
+ } else {
+ // Pointer values require an actual pointer or a string.
+ if (arg.type == Arg::POINTER) {
+ i = reinterpret_cast<uintptr_t>(arg.ptr);
+ } else if (arg.type == Arg::STRING) {
+ i = reinterpret_cast<uintptr_t>(arg.str);
+ } else if (arg.type == Arg::INT &&
+ arg.integer.width == sizeof(NULL) &&
+ arg.integer.i == 0) { // Allow C++'s version of NULL
+ i = 0;
+ } else {
+ DEBUG_CHECK(arg.type == Arg::POINTER || arg.type == Arg::STRING);
+ goto fail_to_expand;
+ }
+
+ // Pointers always include the "0x" prefix.
+ prefix = "0x";
+ }
+
+ // Use IToASCII() to convert to ASCII representation. For decimal
+ // numbers, optionally print a sign. For hexadecimal numbers,
+ // distinguish between upper and lower case. %p addresses are always
+ // printed as upcase. Supports base 8, 10, and 16. Prints padding
+ // and/or prefixes, if so requested.
+ buffer.IToASCII(ch == 'd' && arg.type == Arg::INT,
+ ch != 'x', i,
+ ch == 'o' ? 8 : ch == 'd' ? 10 : 16,
+ pad, padding, prefix);
+ break; }
+ case 's': {
+ // Check that there are arguments left to be inserted.
+ if (cur_arg >= max_args) {
+ DEBUG_CHECK(cur_arg < max_args);
+ goto fail_to_expand;
+ }
+
+ // Check that the argument has the expected type.
+ const Arg& arg = args[cur_arg++];
+ const char *s;
+ if (arg.type == Arg::STRING) {
+ s = arg.str ? arg.str : "<NULL>";
+ } else if (arg.type == Arg::INT && arg.integer.width == sizeof(NULL) &&
+ arg.integer.i == 0) { // Allow C++'s version of NULL
+ s = "<NULL>";
+ } else {
+ DEBUG_CHECK(arg.type == Arg::STRING);
+ goto fail_to_expand;
+ }
+
+ // Apply padding, if needed. This requires us to first check the
+ // length of the string that we are outputting.
+ if (padding) {
+ size_t len = 0;
+ for (const char* src = s; *src++; ) {
+ ++len;
+ }
+ buffer.Pad(' ', padding, len);
+ }
+
+ // Printing a string involves nothing more than copying it into the
+ // output buffer and making sure we don't output more bytes than
+ // available space; Out() takes care of doing that.
+ for (const char* src = s; *src; ) {
+ buffer.Out(*src++);
+ }
+ break; }
+ case '%':
+ // Quoted percent '%' character.
+ goto copy_verbatim;
+ fail_to_expand:
+ // C++ gives us tools to do type checking -- something that snprintf()
+ // could never really do. So, whenever we see arguments that don't
+ // match up with the format string, we refuse to output them. But
+ // since we have to be extremely conservative about being async-
+ // signal-safe, we are limited in the type of error handling that we
+ // can do in production builds (in debug builds we can use
+ // DEBUG_CHECK() and hope for the best). So, all we do is pass the
+ // format string unchanged. That should eventually get the user's
+ // attention; and in the meantime, it hopefully doesn't lose too much
+ // data.
+ default:
+ // Unknown or unsupported format character. Just copy verbatim to
+ // output.
+ buffer.Out('%');
+ DEBUG_CHECK(ch);
+ if (!ch) {
+ goto end_of_format_string;
+ }
+ buffer.Out(ch);
+ break;
+ }
+ } else {
+ copy_verbatim:
+ buffer.Out(fmt[-1]);
+ }
+ }
+ end_of_format_string:
+ end_of_output_buffer:
+ return buffer.GetCount();
+}
+
+} // namespace internal
+
+ssize_t SafeSNPrintf(char* buf, size_t sz, const char* fmt) {
+ // Make sure that at least one NUL byte can be written, and that the buffer
+ // never overflows kSSizeMax. Not only does that use up most or all of the
+ // address space, it also would result in a return code that cannot be
+ // represented.
+ if (static_cast<ssize_t>(sz) < 1)
+ return -1;
+ sz = std::min(sz, kSSizeMax);
+
+ Buffer buffer(buf, sz);
+
+ // In the slow-path, we deal with errors by copying the contents of
+ // "fmt" unexpanded. This means, if there are no arguments passed, the
+ // SafeSPrintf() function always degenerates to a version of strncpy() that
+ // de-duplicates '%' characters.
+ const char* src = fmt;
+ for (; *src; ++src) {
+ buffer.Out(*src);
+ DEBUG_CHECK(src[0] != '%' || src[1] == '%');
+ if (src[0] == '%' && src[1] == '%') {
+ ++src;
+ }
+ }
+ return buffer.GetCount();
+}
+
+} // namespace strings
+} // namespace base
diff --git a/security/sandbox/chromium/base/strings/safe_sprintf.h b/security/sandbox/chromium/base/strings/safe_sprintf.h
new file mode 100644
index 0000000000..01d649d07a
--- /dev/null
+++ b/security/sandbox/chromium/base/strings/safe_sprintf.h
@@ -0,0 +1,246 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_STRINGS_SAFE_SPRINTF_H_
+#define BASE_STRINGS_SAFE_SPRINTF_H_
+
+#include "build/build_config.h"
+
+#include <stddef.h>
+#include <stdint.h>
+#include <stdlib.h>
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+// For ssize_t
+#include <unistd.h>
+#endif
+
+#include "base/base_export.h"
+
+namespace base {
+namespace strings {
+
+#if defined(COMPILER_MSVC)
+// Define ssize_t inside of our namespace.
+#if defined(_WIN64)
+typedef __int64 ssize_t;
+#else
+typedef long ssize_t;
+#endif
+#endif
+
+// SafeSPrintf() is a type-safe and completely self-contained version of
+// snprintf().
+//
+// SafeSNPrintf() is an alternative function signature that can be used when
+// not dealing with fixed-sized buffers. When possible, SafeSPrintf() should
+// always be used instead of SafeSNPrintf()
+//
+// These functions allow for formatting complicated messages from contexts that
+// require strict async-signal-safety. In fact, it is safe to call them from
+// any low-level execution context, as they are guaranteed to make no library
+// or system calls. It deliberately never touches "errno", either.
+//
+// The only exception to this rule is that in debug builds the code calls
+// RAW_CHECK() to help diagnose problems when the format string does not
+// match the rest of the arguments. In release builds, no CHECK()s are used,
+// and SafeSPrintf() instead returns an output string that expands only
+// those arguments that match their format characters. Mismatched arguments
+// are ignored.
+//
+// The code currently only supports a subset of format characters:
+// %c, %o, %d, %x, %X, %p, and %s.
+//
+// SafeSPrintf() aims to be as liberal as reasonably possible. Integer-like
+// values of arbitrary width can be passed to all of the format characters
+// that expect integers. Thus, it is explicitly legal to pass an "int" to
+// "%c", and output will automatically look at the LSB only. It is also
+// explicitly legal to pass either signed or unsigned values, and the format
+// characters will automatically interpret the arguments accordingly.
+//
+// It is still not legal to mix-and-match integer-like values with pointer
+// values. For instance, you cannot pass a pointer to %x, nor can you pass an
+// integer to %p.
+//
+// The one exception is "0" zero being accepted by "%p". This works-around
+// the problem of C++ defining NULL as an integer-like value.
+//
+// All format characters take an optional width parameter. This must be a
+// positive integer. For %d, %o, %x, %X and %p, if the width starts with
+// a leading '0', padding is done with '0' instead of ' ' characters.
+//
+// There are a few features of snprintf()-style format strings, that
+// SafeSPrintf() does not support at this time.
+//
+// If an actual user showed up, there is no particularly strong reason they
+// couldn't be added. But that assumes that the trade-offs between complexity
+// and utility are favorable.
+//
+// For example, adding support for negative padding widths, and for %n are all
+// likely to be viewed positively. They are all clearly useful, low-risk, easy
+// to test, don't jeopardize the async-signal-safety of the code, and overall
+// have little impact on other parts of SafeSPrintf() function.
+//
+// On the other hands, adding support for alternate forms, positional
+// arguments, grouping, wide characters, localization or floating point numbers
+// are all unlikely to ever be added.
+//
+// SafeSPrintf() and SafeSNPrintf() mimic the behavior of snprintf() and they
+// return the number of bytes needed to store the untruncated output. This
+// does *not* include the terminating NUL byte.
+//
+// They return -1, iff a fatal error happened. This typically can only happen,
+// if the buffer size is a) negative, or b) zero (i.e. not even the NUL byte
+// can be written). The return value can never be larger than SSIZE_MAX-1.
+// This ensures that the caller can always add one to the signed return code
+// in order to determine the amount of storage that needs to be allocated.
+//
+// While the code supports type checking and while it is generally very careful
+// to avoid printing incorrect values, it tends to be conservative in printing
+// as much as possible, even when given incorrect parameters. Typically, in
+// case of an error, the format string will not be expanded. (i.e. something
+// like SafeSPrintf(buf, "%p %d", 1, 2) results in "%p 2"). See above for
+// the use of RAW_CHECK() in debug builds, though.
+//
+// Basic example:
+// char buf[20];
+// base::strings::SafeSPrintf(buf, "The answer: %2d", 42);
+//
+// Example with dynamically sized buffer (async-signal-safe). This code won't
+// work on Visual studio, as it requires dynamically allocating arrays on the
+// stack. Consider picking a smaller value for |kMaxSize| if stack size is
+// limited and known. On the other hand, if the parameters to SafeSNPrintf()
+// are trusted and not controllable by the user, you can consider eliminating
+// the check for |kMaxSize| altogether. The current value of SSIZE_MAX is
+// essentially a no-op that just illustrates how to implement an upper bound:
+// const size_t kInitialSize = 128;
+// const size_t kMaxSize = std::numeric_limits<ssize_t>::max();
+// size_t size = kInitialSize;
+// for (;;) {
+// char buf[size];
+// size = SafeSNPrintf(buf, size, "Error message \"%s\"\n", err) + 1;
+// if (sizeof(buf) < kMaxSize && size > kMaxSize) {
+// size = kMaxSize;
+// continue;
+// } else if (size > sizeof(buf))
+// continue;
+// write(2, buf, size-1);
+// break;
+// }
+
+namespace internal {
+// Helpers that use C++ overloading, templates, and specializations to deduce
+// and record type information from function arguments. This allows us to
+// later write a type-safe version of snprintf().
+
+struct Arg {
+ enum Type { INT, UINT, STRING, POINTER };
+
+ // Any integer-like value.
+ Arg(signed char c) : type(INT) {
+ integer.i = c;
+ integer.width = sizeof(char);
+ }
+ Arg(unsigned char c) : type(UINT) {
+ integer.i = c;
+ integer.width = sizeof(char);
+ }
+ Arg(signed short j) : type(INT) {
+ integer.i = j;
+ integer.width = sizeof(short);
+ }
+ Arg(unsigned short j) : type(UINT) {
+ integer.i = j;
+ integer.width = sizeof(short);
+ }
+ Arg(signed int j) : type(INT) {
+ integer.i = j;
+ integer.width = sizeof(int);
+ }
+ Arg(unsigned int j) : type(UINT) {
+ integer.i = j;
+ integer.width = sizeof(int);
+ }
+ Arg(signed long j) : type(INT) {
+ integer.i = j;
+ integer.width = sizeof(long);
+ }
+ Arg(unsigned long j) : type(UINT) {
+ integer.i = j;
+ integer.width = sizeof(long);
+ }
+ Arg(signed long long j) : type(INT) {
+ integer.i = j;
+ integer.width = sizeof(long long);
+ }
+ Arg(unsigned long long j) : type(UINT) {
+ integer.i = j;
+ integer.width = sizeof(long long);
+ }
+
+ // A C-style text string.
+ Arg(const char* s) : str(s), type(STRING) { }
+ Arg(char* s) : str(s), type(STRING) { }
+
+ // Any pointer value that can be cast to a "void*".
+ template<class T> Arg(T* p) : ptr((void*)p), type(POINTER) { }
+
+ union {
+ // An integer-like value.
+ struct {
+ int64_t i;
+ unsigned char width;
+ } integer;
+
+ // A C-style text string.
+ const char* str;
+
+ // A pointer to an arbitrary object.
+ const void* ptr;
+ };
+ const enum Type type;
+};
+
+// This is the internal function that performs the actual formatting of
+// an snprintf()-style format string.
+BASE_EXPORT ssize_t SafeSNPrintf(char* buf, size_t sz, const char* fmt,
+ const Arg* args, size_t max_args);
+
+#if !defined(NDEBUG)
+// In debug builds, allow unit tests to artificially lower the kSSizeMax
+// constant that is used as a hard upper-bound for all buffers. In normal
+// use, this constant should always be std::numeric_limits<ssize_t>::max().
+BASE_EXPORT void SetSafeSPrintfSSizeMaxForTest(size_t max);
+BASE_EXPORT size_t GetSafeSPrintfSSizeMaxForTest();
+#endif
+
+} // namespace internal
+
+template<typename... Args>
+ssize_t SafeSNPrintf(char* buf, size_t N, const char* fmt, Args... args) {
+ // Use Arg() object to record type information and then copy arguments to an
+ // array to make it easier to iterate over them.
+ const internal::Arg arg_array[] = { args... };
+ return internal::SafeSNPrintf(buf, N, fmt, arg_array, sizeof...(args));
+}
+
+template<size_t N, typename... Args>
+ssize_t SafeSPrintf(char (&buf)[N], const char* fmt, Args... args) {
+ // Use Arg() object to record type information and then copy arguments to an
+ // array to make it easier to iterate over them.
+ const internal::Arg arg_array[] = { args... };
+ return internal::SafeSNPrintf(buf, N, fmt, arg_array, sizeof...(args));
+}
+
+// Fast-path when we don't actually need to substitute any arguments.
+BASE_EXPORT ssize_t SafeSNPrintf(char* buf, size_t N, const char* fmt);
+template<size_t N>
+inline ssize_t SafeSPrintf(char (&buf)[N], const char* fmt) {
+ return SafeSNPrintf(buf, N, fmt);
+}
+
+} // namespace strings
+} // namespace base
+
+#endif // BASE_STRINGS_SAFE_SPRINTF_H_
diff --git a/security/sandbox/chromium/base/strings/safe_sprintf_unittest.cc b/security/sandbox/chromium/base/strings/safe_sprintf_unittest.cc
new file mode 100644
index 0000000000..bb9908f928
--- /dev/null
+++ b/security/sandbox/chromium/base/strings/safe_sprintf_unittest.cc
@@ -0,0 +1,765 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/safe_sprintf.h"
+
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <limits>
+#include <memory>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+// Death tests on Android are currently very flaky. No need to add more flaky
+// tests, as they just make it hard to spot real problems.
+// TODO(markus): See if the restrictions on Android can eventually be lifted.
+#if defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID)
+#define ALLOW_DEATH_TEST
+#endif
+
+namespace base {
+namespace strings {
+
+TEST(SafeSPrintfTest, Empty) {
+ char buf[2] = { 'X', 'X' };
+
+ // Negative buffer size should always result in an error.
+ EXPECT_EQ(-1, SafeSNPrintf(buf, static_cast<size_t>(-1), ""));
+ EXPECT_EQ('X', buf[0]);
+ EXPECT_EQ('X', buf[1]);
+
+ // Zero buffer size should always result in an error.
+ EXPECT_EQ(-1, SafeSNPrintf(buf, 0, ""));
+ EXPECT_EQ('X', buf[0]);
+ EXPECT_EQ('X', buf[1]);
+
+ // A one-byte buffer should always print a single NUL byte.
+ EXPECT_EQ(0, SafeSNPrintf(buf, 1, ""));
+ EXPECT_EQ(0, buf[0]);
+ EXPECT_EQ('X', buf[1]);
+ buf[0] = 'X';
+
+ // A larger buffer should leave the trailing bytes unchanged.
+ EXPECT_EQ(0, SafeSNPrintf(buf, 2, ""));
+ EXPECT_EQ(0, buf[0]);
+ EXPECT_EQ('X', buf[1]);
+ buf[0] = 'X';
+
+ // The same test using SafeSPrintf() instead of SafeSNPrintf().
+ EXPECT_EQ(0, SafeSPrintf(buf, ""));
+ EXPECT_EQ(0, buf[0]);
+ EXPECT_EQ('X', buf[1]);
+ buf[0] = 'X';
+}
+
+TEST(SafeSPrintfTest, NoArguments) {
+ // Output a text message that doesn't require any substitutions. This
+ // is roughly equivalent to calling strncpy() (but unlike strncpy(), it does
+ // always add a trailing NUL; it always deduplicates '%' characters).
+ static const char text[] = "hello world";
+ char ref[20], buf[20];
+ memset(ref, 'X', sizeof(ref));
+ memcpy(buf, ref, sizeof(buf));
+
+ // A negative buffer size should always result in an error.
+ EXPECT_EQ(-1, SafeSNPrintf(buf, static_cast<size_t>(-1), text));
+ EXPECT_TRUE(!memcmp(buf, ref, sizeof(buf)));
+
+ // Zero buffer size should always result in an error.
+ EXPECT_EQ(-1, SafeSNPrintf(buf, 0, text));
+ EXPECT_TRUE(!memcmp(buf, ref, sizeof(buf)));
+
+ // A one-byte buffer should always print a single NUL byte.
+ EXPECT_EQ(static_cast<ssize_t>(sizeof(text))-1, SafeSNPrintf(buf, 1, text));
+ EXPECT_EQ(0, buf[0]);
+ EXPECT_TRUE(!memcmp(buf+1, ref+1, sizeof(buf)-1));
+ memcpy(buf, ref, sizeof(buf));
+
+ // A larger (but limited) buffer should always leave the trailing bytes
+ // unchanged.
+ EXPECT_EQ(static_cast<ssize_t>(sizeof(text))-1, SafeSNPrintf(buf, 2, text));
+ EXPECT_EQ(text[0], buf[0]);
+ EXPECT_EQ(0, buf[1]);
+ EXPECT_TRUE(!memcmp(buf+2, ref+2, sizeof(buf)-2));
+ memcpy(buf, ref, sizeof(buf));
+
+ // A unrestricted buffer length should always leave the trailing bytes
+ // unchanged.
+ EXPECT_EQ(static_cast<ssize_t>(sizeof(text))-1,
+ SafeSNPrintf(buf, sizeof(buf), text));
+ EXPECT_EQ(std::string(text), std::string(buf));
+ EXPECT_TRUE(!memcmp(buf + sizeof(text), ref + sizeof(text),
+ sizeof(buf) - sizeof(text)));
+ memcpy(buf, ref, sizeof(buf));
+
+ // The same test using SafeSPrintf() instead of SafeSNPrintf().
+ EXPECT_EQ(static_cast<ssize_t>(sizeof(text))-1, SafeSPrintf(buf, text));
+ EXPECT_EQ(std::string(text), std::string(buf));
+ EXPECT_TRUE(!memcmp(buf + sizeof(text), ref + sizeof(text),
+ sizeof(buf) - sizeof(text)));
+ memcpy(buf, ref, sizeof(buf));
+
+ // Check for deduplication of '%' percent characters.
+ EXPECT_EQ(1, SafeSPrintf(buf, "%%"));
+ EXPECT_EQ(2, SafeSPrintf(buf, "%%%%"));
+ EXPECT_EQ(2, SafeSPrintf(buf, "%%X"));
+ EXPECT_EQ(3, SafeSPrintf(buf, "%%%%X"));
+#if defined(NDEBUG)
+ EXPECT_EQ(1, SafeSPrintf(buf, "%"));
+ EXPECT_EQ(2, SafeSPrintf(buf, "%%%"));
+ EXPECT_EQ(2, SafeSPrintf(buf, "%X"));
+ EXPECT_EQ(3, SafeSPrintf(buf, "%%%X"));
+#elif defined(ALLOW_DEATH_TEST)
+ EXPECT_DEATH(SafeSPrintf(buf, "%"), "src.1. == '%'");
+ EXPECT_DEATH(SafeSPrintf(buf, "%%%"), "src.1. == '%'");
+ EXPECT_DEATH(SafeSPrintf(buf, "%X"), "src.1. == '%'");
+ EXPECT_DEATH(SafeSPrintf(buf, "%%%X"), "src.1. == '%'");
+#endif
+}
+
+TEST(SafeSPrintfTest, OneArgument) {
+ // Test basic single-argument single-character substitution.
+ const char text[] = "hello world";
+ const char fmt[] = "hello%cworld";
+ char ref[20], buf[20];
+ memset(ref, 'X', sizeof(buf));
+ memcpy(buf, ref, sizeof(buf));
+
+ // A negative buffer size should always result in an error.
+ EXPECT_EQ(-1, SafeSNPrintf(buf, static_cast<size_t>(-1), fmt, ' '));
+ EXPECT_TRUE(!memcmp(buf, ref, sizeof(buf)));
+
+ // Zero buffer size should always result in an error.
+ EXPECT_EQ(-1, SafeSNPrintf(buf, 0, fmt, ' '));
+ EXPECT_TRUE(!memcmp(buf, ref, sizeof(buf)));
+
+ // A one-byte buffer should always print a single NUL byte.
+ EXPECT_EQ(static_cast<ssize_t>(sizeof(text))-1,
+ SafeSNPrintf(buf, 1, fmt, ' '));
+ EXPECT_EQ(0, buf[0]);
+ EXPECT_TRUE(!memcmp(buf+1, ref+1, sizeof(buf)-1));
+ memcpy(buf, ref, sizeof(buf));
+
+ // A larger (but limited) buffer should always leave the trailing bytes
+ // unchanged.
+ EXPECT_EQ(static_cast<ssize_t>(sizeof(text))-1,
+ SafeSNPrintf(buf, 2, fmt, ' '));
+ EXPECT_EQ(text[0], buf[0]);
+ EXPECT_EQ(0, buf[1]);
+ EXPECT_TRUE(!memcmp(buf+2, ref+2, sizeof(buf)-2));
+ memcpy(buf, ref, sizeof(buf));
+
+ // A unrestricted buffer length should always leave the trailing bytes
+ // unchanged.
+ EXPECT_EQ(static_cast<ssize_t>(sizeof(text))-1,
+ SafeSNPrintf(buf, sizeof(buf), fmt, ' '));
+ EXPECT_EQ(std::string(text), std::string(buf));
+ EXPECT_TRUE(!memcmp(buf + sizeof(text), ref + sizeof(text),
+ sizeof(buf) - sizeof(text)));
+ memcpy(buf, ref, sizeof(buf));
+
+ // The same test using SafeSPrintf() instead of SafeSNPrintf().
+ EXPECT_EQ(static_cast<ssize_t>(sizeof(text))-1, SafeSPrintf(buf, fmt, ' '));
+ EXPECT_EQ(std::string(text), std::string(buf));
+ EXPECT_TRUE(!memcmp(buf + sizeof(text), ref + sizeof(text),
+ sizeof(buf) - sizeof(text)));
+ memcpy(buf, ref, sizeof(buf));
+
+ // Check for deduplication of '%' percent characters.
+ EXPECT_EQ(1, SafeSPrintf(buf, "%%", 0));
+ EXPECT_EQ(2, SafeSPrintf(buf, "%%%%", 0));
+ EXPECT_EQ(2, SafeSPrintf(buf, "%Y", 0));
+ EXPECT_EQ(2, SafeSPrintf(buf, "%%Y", 0));
+ EXPECT_EQ(3, SafeSPrintf(buf, "%%%Y", 0));
+ EXPECT_EQ(3, SafeSPrintf(buf, "%%%%Y", 0));
+#if defined(NDEBUG)
+ EXPECT_EQ(1, SafeSPrintf(buf, "%", 0));
+ EXPECT_EQ(2, SafeSPrintf(buf, "%%%", 0));
+#elif defined(ALLOW_DEATH_TEST)
+ EXPECT_DEATH(SafeSPrintf(buf, "%", 0), "ch");
+ EXPECT_DEATH(SafeSPrintf(buf, "%%%", 0), "ch");
+#endif
+}
+
+TEST(SafeSPrintfTest, MissingArg) {
+#if defined(NDEBUG)
+ char buf[20];
+ EXPECT_EQ(3, SafeSPrintf(buf, "%c%c", 'A'));
+ EXPECT_EQ("A%c", std::string(buf));
+#elif defined(ALLOW_DEATH_TEST)
+ char buf[20];
+ EXPECT_DEATH(SafeSPrintf(buf, "%c%c", 'A'), "cur_arg < max_args");
+#endif
+}
+
+TEST(SafeSPrintfTest, ASANFriendlyBufferTest) {
+ // Print into a buffer that is sized exactly to size. ASAN can verify that
+ // nobody attempts to write past the end of the buffer.
+ // There is a more complicated test in PrintLongString() that covers a lot
+ // more edge case, but it is also harder to debug in case of a failure.
+ const char kTestString[] = "This is a test";
+ std::unique_ptr<char[]> buf(new char[sizeof(kTestString)]);
+ EXPECT_EQ(static_cast<ssize_t>(sizeof(kTestString) - 1),
+ SafeSNPrintf(buf.get(), sizeof(kTestString), kTestString));
+ EXPECT_EQ(std::string(kTestString), std::string(buf.get()));
+ EXPECT_EQ(static_cast<ssize_t>(sizeof(kTestString) - 1),
+ SafeSNPrintf(buf.get(), sizeof(kTestString), "%s", kTestString));
+ EXPECT_EQ(std::string(kTestString), std::string(buf.get()));
+}
+
+TEST(SafeSPrintfTest, NArgs) {
+ // Pre-C++11 compilers have a different code path, that can only print
+ // up to ten distinct arguments.
+ // We test both SafeSPrintf() and SafeSNPrintf(). This makes sure we don't
+ // have typos in the copy-n-pasted code that is needed to deal with various
+ // numbers of arguments.
+ char buf[12];
+ EXPECT_EQ(1, SafeSPrintf(buf, "%c", 1));
+ EXPECT_EQ("\1", std::string(buf));
+ EXPECT_EQ(2, SafeSPrintf(buf, "%c%c", 1, 2));
+ EXPECT_EQ("\1\2", std::string(buf));
+ EXPECT_EQ(3, SafeSPrintf(buf, "%c%c%c", 1, 2, 3));
+ EXPECT_EQ("\1\2\3", std::string(buf));
+ EXPECT_EQ(4, SafeSPrintf(buf, "%c%c%c%c", 1, 2, 3, 4));
+ EXPECT_EQ("\1\2\3\4", std::string(buf));
+ EXPECT_EQ(5, SafeSPrintf(buf, "%c%c%c%c%c", 1, 2, 3, 4, 5));
+ EXPECT_EQ("\1\2\3\4\5", std::string(buf));
+ EXPECT_EQ(6, SafeSPrintf(buf, "%c%c%c%c%c%c", 1, 2, 3, 4, 5, 6));
+ EXPECT_EQ("\1\2\3\4\5\6", std::string(buf));
+ EXPECT_EQ(7, SafeSPrintf(buf, "%c%c%c%c%c%c%c", 1, 2, 3, 4, 5, 6, 7));
+ EXPECT_EQ("\1\2\3\4\5\6\7", std::string(buf));
+ EXPECT_EQ(8, SafeSPrintf(buf, "%c%c%c%c%c%c%c%c", 1, 2, 3, 4, 5, 6, 7, 8));
+ EXPECT_EQ("\1\2\3\4\5\6\7\10", std::string(buf));
+ EXPECT_EQ(9, SafeSPrintf(buf, "%c%c%c%c%c%c%c%c%c",
+ 1, 2, 3, 4, 5, 6, 7, 8, 9));
+ EXPECT_EQ("\1\2\3\4\5\6\7\10\11", std::string(buf));
+ EXPECT_EQ(10, SafeSPrintf(buf, "%c%c%c%c%c%c%c%c%c%c",
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10));
+
+ // Repeat all the tests with SafeSNPrintf() instead of SafeSPrintf().
+ EXPECT_EQ("\1\2\3\4\5\6\7\10\11\12", std::string(buf));
+ EXPECT_EQ(1, SafeSNPrintf(buf, 11, "%c", 1));
+ EXPECT_EQ("\1", std::string(buf));
+ EXPECT_EQ(2, SafeSNPrintf(buf, 11, "%c%c", 1, 2));
+ EXPECT_EQ("\1\2", std::string(buf));
+ EXPECT_EQ(3, SafeSNPrintf(buf, 11, "%c%c%c", 1, 2, 3));
+ EXPECT_EQ("\1\2\3", std::string(buf));
+ EXPECT_EQ(4, SafeSNPrintf(buf, 11, "%c%c%c%c", 1, 2, 3, 4));
+ EXPECT_EQ("\1\2\3\4", std::string(buf));
+ EXPECT_EQ(5, SafeSNPrintf(buf, 11, "%c%c%c%c%c", 1, 2, 3, 4, 5));
+ EXPECT_EQ("\1\2\3\4\5", std::string(buf));
+ EXPECT_EQ(6, SafeSNPrintf(buf, 11, "%c%c%c%c%c%c", 1, 2, 3, 4, 5, 6));
+ EXPECT_EQ("\1\2\3\4\5\6", std::string(buf));
+ EXPECT_EQ(7, SafeSNPrintf(buf, 11, "%c%c%c%c%c%c%c", 1, 2, 3, 4, 5, 6, 7));
+ EXPECT_EQ("\1\2\3\4\5\6\7", std::string(buf));
+ EXPECT_EQ(8, SafeSNPrintf(buf, 11, "%c%c%c%c%c%c%c%c",
+ 1, 2, 3, 4, 5, 6, 7, 8));
+ EXPECT_EQ("\1\2\3\4\5\6\7\10", std::string(buf));
+ EXPECT_EQ(9, SafeSNPrintf(buf, 11, "%c%c%c%c%c%c%c%c%c",
+ 1, 2, 3, 4, 5, 6, 7, 8, 9));
+ EXPECT_EQ("\1\2\3\4\5\6\7\10\11", std::string(buf));
+ EXPECT_EQ(10, SafeSNPrintf(buf, 11, "%c%c%c%c%c%c%c%c%c%c",
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10));
+ EXPECT_EQ("\1\2\3\4\5\6\7\10\11\12", std::string(buf));
+
+ EXPECT_EQ(11, SafeSPrintf(buf, "%c%c%c%c%c%c%c%c%c%c%c",
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11));
+ EXPECT_EQ("\1\2\3\4\5\6\7\10\11\12\13", std::string(buf));
+ EXPECT_EQ(11, SafeSNPrintf(buf, 12, "%c%c%c%c%c%c%c%c%c%c%c",
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11));
+ EXPECT_EQ("\1\2\3\4\5\6\7\10\11\12\13", std::string(buf));
+}
+
+TEST(SafeSPrintfTest, DataTypes) {
+ char buf[40];
+
+ // Bytes
+ EXPECT_EQ(1, SafeSPrintf(buf, "%d", (uint8_t)1));
+ EXPECT_EQ("1", std::string(buf));
+ EXPECT_EQ(3, SafeSPrintf(buf, "%d", (uint8_t)-1));
+ EXPECT_EQ("255", std::string(buf));
+ EXPECT_EQ(1, SafeSPrintf(buf, "%d", (int8_t)1));
+ EXPECT_EQ("1", std::string(buf));
+ EXPECT_EQ(2, SafeSPrintf(buf, "%d", (int8_t)-1));
+ EXPECT_EQ("-1", std::string(buf));
+ EXPECT_EQ(4, SafeSPrintf(buf, "%d", (int8_t)-128));
+ EXPECT_EQ("-128", std::string(buf));
+
+ // Half-words
+ EXPECT_EQ(1, SafeSPrintf(buf, "%d", (uint16_t)1));
+ EXPECT_EQ("1", std::string(buf));
+ EXPECT_EQ(5, SafeSPrintf(buf, "%d", (uint16_t)-1));
+ EXPECT_EQ("65535", std::string(buf));
+ EXPECT_EQ(1, SafeSPrintf(buf, "%d", (int16_t)1));
+ EXPECT_EQ("1", std::string(buf));
+ EXPECT_EQ(2, SafeSPrintf(buf, "%d", (int16_t)-1));
+ EXPECT_EQ("-1", std::string(buf));
+ EXPECT_EQ(6, SafeSPrintf(buf, "%d", (int16_t)-32768));
+ EXPECT_EQ("-32768", std::string(buf));
+
+ // Words
+ EXPECT_EQ(1, SafeSPrintf(buf, "%d", (uint32_t)1));
+ EXPECT_EQ("1", std::string(buf));
+ EXPECT_EQ(10, SafeSPrintf(buf, "%d", (uint32_t)-1));
+ EXPECT_EQ("4294967295", std::string(buf));
+ EXPECT_EQ(1, SafeSPrintf(buf, "%d", (int32_t)1));
+ EXPECT_EQ("1", std::string(buf));
+ EXPECT_EQ(2, SafeSPrintf(buf, "%d", (int32_t)-1));
+ EXPECT_EQ("-1", std::string(buf));
+ // Work-around for an limitation of C90
+ EXPECT_EQ(11, SafeSPrintf(buf, "%d", (int32_t)-2147483647-1));
+ EXPECT_EQ("-2147483648", std::string(buf));
+
+ // Quads
+ EXPECT_EQ(1, SafeSPrintf(buf, "%d", (uint64_t)1));
+ EXPECT_EQ("1", std::string(buf));
+ EXPECT_EQ(20, SafeSPrintf(buf, "%d", (uint64_t)-1));
+ EXPECT_EQ("18446744073709551615", std::string(buf));
+ EXPECT_EQ(1, SafeSPrintf(buf, "%d", (int64_t)1));
+ EXPECT_EQ("1", std::string(buf));
+ EXPECT_EQ(2, SafeSPrintf(buf, "%d", (int64_t)-1));
+ EXPECT_EQ("-1", std::string(buf));
+ // Work-around for an limitation of C90
+ EXPECT_EQ(20, SafeSPrintf(buf, "%d", (int64_t)-9223372036854775807LL-1));
+ EXPECT_EQ("-9223372036854775808", std::string(buf));
+
+ // Strings (both const and mutable).
+ EXPECT_EQ(4, SafeSPrintf(buf, "test"));
+ EXPECT_EQ("test", std::string(buf));
+ EXPECT_EQ(4, SafeSPrintf(buf, buf));
+ EXPECT_EQ("test", std::string(buf));
+
+ // Pointer
+ char addr[20];
+ sprintf(addr, "0x%llX", (unsigned long long)(uintptr_t)buf);
+ SafeSPrintf(buf, "%p", buf);
+ EXPECT_EQ(std::string(addr), std::string(buf));
+ SafeSPrintf(buf, "%p", (const char *)buf);
+ EXPECT_EQ(std::string(addr), std::string(buf));
+ sprintf(addr, "0x%llX", (unsigned long long)(uintptr_t)sprintf);
+ SafeSPrintf(buf, "%p", sprintf);
+ EXPECT_EQ(std::string(addr), std::string(buf));
+
+ // Padding for pointers is a little more complicated because of the "0x"
+ // prefix. Padding with '0' zeros is relatively straight-forward, but
+ // padding with ' ' spaces requires more effort.
+ sprintf(addr, "0x%017llX", (unsigned long long)(uintptr_t)buf);
+ SafeSPrintf(buf, "%019p", buf);
+ EXPECT_EQ(std::string(addr), std::string(buf));
+ sprintf(addr, "0x%llX", (unsigned long long)(uintptr_t)buf);
+ memset(addr, ' ',
+ (char*)memmove(addr + sizeof(addr) - strlen(addr) - 1,
+ addr, strlen(addr)+1) - addr);
+ SafeSPrintf(buf, "%19p", buf);
+ EXPECT_EQ(std::string(addr), std::string(buf));
+}
+
+namespace {
+void PrintLongString(char* buf, size_t sz) {
+ // Output a reasonably complex expression into a limited-size buffer.
+ // At least one byte is available for writing the NUL character.
+ CHECK_GT(sz, static_cast<size_t>(0));
+
+ // Allocate slightly more space, so that we can verify that SafeSPrintf()
+ // never writes past the end of the buffer.
+ std::unique_ptr<char[]> tmp(new char[sz + 2]);
+ memset(tmp.get(), 'X', sz+2);
+
+ // Use SafeSPrintf() to output a complex list of arguments:
+ // - test padding and truncating %c single characters.
+ // - test truncating %s simple strings.
+ // - test mismatching arguments and truncating (for %d != %s).
+ // - test zero-padding and truncating %x hexadecimal numbers.
+ // - test outputting and truncating %d MININT.
+ // - test outputting and truncating %p arbitrary pointer values.
+ // - test outputting, padding and truncating NULL-pointer %s strings.
+ char* out = tmp.get();
+ size_t out_sz = sz;
+ size_t len;
+ for (std::unique_ptr<char[]> perfect_buf;;) {
+ size_t needed =
+ SafeSNPrintf(out, out_sz,
+#if defined(NDEBUG)
+ "A%2cong %s: %d %010X %d %p%7s", 'l', "string", "",
+#else
+ "A%2cong %s: %%d %010X %d %p%7s", 'l', "string",
+#endif
+ 0xDEADBEEF, std::numeric_limits<intptr_t>::min(),
+ PrintLongString, static_cast<char*>(nullptr)) +
+ 1;
+
+ // Various sanity checks:
+ // The numbered of characters needed to print the full string should always
+ // be bigger or equal to the bytes that have actually been output.
+ len = strlen(tmp.get());
+ CHECK_GE(needed, len+1);
+
+ // The number of characters output should always fit into the buffer that
+ // was passed into SafeSPrintf().
+ CHECK_LT(len, out_sz);
+
+ // The output is always terminated with a NUL byte (actually, this test is
+ // always going to pass, as strlen() already verified this)
+ EXPECT_FALSE(tmp[len]);
+
+ // ASAN can check that we are not overwriting buffers, iff we make sure the
+ // buffer is exactly the size that we are expecting to be written. After
+ // running SafeSNPrintf() the first time, it is possible to compute the
+ // correct buffer size for this test. So, allocate a second buffer and run
+ // the exact same SafeSNPrintf() command again.
+ if (!perfect_buf.get()) {
+ out_sz = std::min(needed, sz);
+ out = new char[out_sz];
+ perfect_buf.reset(out);
+ } else {
+ break;
+ }
+ }
+
+ // All trailing bytes are unchanged.
+ for (size_t i = len+1; i < sz+2; ++i)
+ EXPECT_EQ('X', tmp[i]);
+
+ // The text that was generated by SafeSPrintf() should always match the
+ // equivalent text generated by sprintf(). Please note that the format
+ // string for sprintf() is not complicated, as it does not have the
+ // benefit of getting type information from the C++ compiler.
+ //
+ // N.B.: It would be so much cleaner to use snprintf(). But unfortunately,
+ // Visual Studio doesn't support this function, and the work-arounds
+ // are all really awkward.
+ char ref[256];
+ CHECK_LE(sz, sizeof(ref));
+ sprintf(ref, "A long string: %%d 00DEADBEEF %lld 0x%llX <NULL>",
+ static_cast<long long>(std::numeric_limits<intptr_t>::min()),
+ static_cast<unsigned long long>(
+ reinterpret_cast<uintptr_t>(PrintLongString)));
+ ref[sz-1] = '\000';
+
+#if defined(NDEBUG)
+ const size_t kSSizeMax = std::numeric_limits<ssize_t>::max();
+#else
+ const size_t kSSizeMax = internal::GetSafeSPrintfSSizeMaxForTest();
+#endif
+
+ // Compare the output from SafeSPrintf() to the one from sprintf().
+ EXPECT_EQ(std::string(ref).substr(0, kSSizeMax-1), std::string(tmp.get()));
+
+ // We allocated a slightly larger buffer, so that we could perform some
+ // extra sanity checks. Now that the tests have all passed, we copy the
+ // data to the output buffer that the caller provided.
+ memcpy(buf, tmp.get(), len+1);
+}
+
+#if !defined(NDEBUG)
+class ScopedSafeSPrintfSSizeMaxSetter {
+ public:
+ ScopedSafeSPrintfSSizeMaxSetter(size_t sz) {
+ old_ssize_max_ = internal::GetSafeSPrintfSSizeMaxForTest();
+ internal::SetSafeSPrintfSSizeMaxForTest(sz);
+ }
+
+ ~ScopedSafeSPrintfSSizeMaxSetter() {
+ internal::SetSafeSPrintfSSizeMaxForTest(old_ssize_max_);
+ }
+
+ private:
+ size_t old_ssize_max_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedSafeSPrintfSSizeMaxSetter);
+};
+#endif
+
+} // anonymous namespace
+
+TEST(SafeSPrintfTest, Truncation) {
+ // We use PrintLongString() to print a complex long string and then
+ // truncate to all possible lengths. This ends up exercising a lot of
+ // different code paths in SafeSPrintf() and IToASCII(), as truncation can
+ // happen in a lot of different states.
+ char ref[256];
+ PrintLongString(ref, sizeof(ref));
+ for (size_t i = strlen(ref)+1; i; --i) {
+ char buf[sizeof(ref)];
+ PrintLongString(buf, i);
+ EXPECT_EQ(std::string(ref, i - 1), std::string(buf));
+ }
+
+ // When compiling in debug mode, we have the ability to fake a small
+ // upper limit for the maximum value that can be stored in an ssize_t.
+ // SafeSPrintf() uses this upper limit to determine how many bytes it will
+ // write to the buffer, even if the caller claimed a bigger buffer size.
+ // Repeat the truncation test and verify that this other code path in
+ // SafeSPrintf() works correctly, too.
+#if !defined(NDEBUG)
+ for (size_t i = strlen(ref)+1; i > 1; --i) {
+ ScopedSafeSPrintfSSizeMaxSetter ssize_max_setter(i);
+ char buf[sizeof(ref)];
+ PrintLongString(buf, sizeof(buf));
+ EXPECT_EQ(std::string(ref, i - 1), std::string(buf));
+ }
+
+ // kSSizeMax is also used to constrain the maximum amount of padding, before
+ // SafeSPrintf() detects an error in the format string.
+ ScopedSafeSPrintfSSizeMaxSetter ssize_max_setter(100);
+ char buf[256];
+ EXPECT_EQ(99, SafeSPrintf(buf, "%99c", ' '));
+ EXPECT_EQ(std::string(99, ' '), std::string(buf));
+ *buf = '\000';
+#if defined(ALLOW_DEATH_TEST)
+ EXPECT_DEATH(SafeSPrintf(buf, "%100c", ' '), "padding <= max_padding");
+#endif
+ EXPECT_EQ(0, *buf);
+#endif
+}
+
+TEST(SafeSPrintfTest, Padding) {
+ char buf[40], fmt[40];
+
+ // Chars %c
+ EXPECT_EQ(1, SafeSPrintf(buf, "%c", 'A'));
+ EXPECT_EQ("A", std::string(buf));
+ EXPECT_EQ(2, SafeSPrintf(buf, "%2c", 'A'));
+ EXPECT_EQ(" A", std::string(buf));
+ EXPECT_EQ(2, SafeSPrintf(buf, "%02c", 'A'));
+ EXPECT_EQ(" A", std::string(buf));
+ EXPECT_EQ(4, SafeSPrintf(buf, "%-2c", 'A'));
+ EXPECT_EQ("%-2c", std::string(buf));
+ SafeSPrintf(fmt, "%%%dc", std::numeric_limits<ssize_t>::max() - 1);
+ EXPECT_EQ(std::numeric_limits<ssize_t>::max()-1, SafeSPrintf(buf, fmt, 'A'));
+ SafeSPrintf(fmt, "%%%dc",
+ static_cast<size_t>(std::numeric_limits<ssize_t>::max()));
+#if defined(NDEBUG)
+ EXPECT_EQ(2, SafeSPrintf(buf, fmt, 'A'));
+ EXPECT_EQ("%c", std::string(buf));
+#elif defined(ALLOW_DEATH_TEST)
+ EXPECT_DEATH(SafeSPrintf(buf, fmt, 'A'), "padding <= max_padding");
+#endif
+
+ // Octal %o
+ EXPECT_EQ(1, SafeSPrintf(buf, "%o", 1));
+ EXPECT_EQ("1", std::string(buf));
+ EXPECT_EQ(2, SafeSPrintf(buf, "%2o", 1));
+ EXPECT_EQ(" 1", std::string(buf));
+ EXPECT_EQ(2, SafeSPrintf(buf, "%02o", 1));
+ EXPECT_EQ("01", std::string(buf));
+ EXPECT_EQ(12, SafeSPrintf(buf, "%12o", -1));
+ EXPECT_EQ(" 37777777777", std::string(buf));
+ EXPECT_EQ(12, SafeSPrintf(buf, "%012o", -1));
+ EXPECT_EQ("037777777777", std::string(buf));
+ EXPECT_EQ(23, SafeSPrintf(buf, "%23o", -1LL));
+ EXPECT_EQ(" 1777777777777777777777", std::string(buf));
+ EXPECT_EQ(23, SafeSPrintf(buf, "%023o", -1LL));
+ EXPECT_EQ("01777777777777777777777", std::string(buf));
+ EXPECT_EQ(3, SafeSPrintf(buf, "%2o", 0111));
+ EXPECT_EQ("111", std::string(buf));
+ EXPECT_EQ(4, SafeSPrintf(buf, "%-2o", 1));
+ EXPECT_EQ("%-2o", std::string(buf));
+ SafeSPrintf(fmt, "%%%do", std::numeric_limits<ssize_t>::max()-1);
+ EXPECT_EQ(std::numeric_limits<ssize_t>::max()-1,
+ SafeSNPrintf(buf, 4, fmt, 1));
+ EXPECT_EQ(" ", std::string(buf));
+ SafeSPrintf(fmt, "%%0%do", std::numeric_limits<ssize_t>::max()-1);
+ EXPECT_EQ(std::numeric_limits<ssize_t>::max()-1,
+ SafeSNPrintf(buf, 4, fmt, 1));
+ EXPECT_EQ("000", std::string(buf));
+ SafeSPrintf(fmt, "%%%do",
+ static_cast<size_t>(std::numeric_limits<ssize_t>::max()));
+#if defined(NDEBUG)
+ EXPECT_EQ(2, SafeSPrintf(buf, fmt, 1));
+ EXPECT_EQ("%o", std::string(buf));
+#elif defined(ALLOW_DEATH_TEST)
+ EXPECT_DEATH(SafeSPrintf(buf, fmt, 1), "padding <= max_padding");
+#endif
+
+ // Decimals %d
+ EXPECT_EQ(1, SafeSPrintf(buf, "%d", 1));
+ EXPECT_EQ("1", std::string(buf));
+ EXPECT_EQ(2, SafeSPrintf(buf, "%2d", 1));
+ EXPECT_EQ(" 1", std::string(buf));
+ EXPECT_EQ(2, SafeSPrintf(buf, "%02d", 1));
+ EXPECT_EQ("01", std::string(buf));
+ EXPECT_EQ(3, SafeSPrintf(buf, "%3d", -1));
+ EXPECT_EQ(" -1", std::string(buf));
+ EXPECT_EQ(3, SafeSPrintf(buf, "%03d", -1));
+ EXPECT_EQ("-01", std::string(buf));
+ EXPECT_EQ(3, SafeSPrintf(buf, "%2d", 111));
+ EXPECT_EQ("111", std::string(buf));
+ EXPECT_EQ(4, SafeSPrintf(buf, "%2d", -111));
+ EXPECT_EQ("-111", std::string(buf));
+ EXPECT_EQ(4, SafeSPrintf(buf, "%-2d", 1));
+ EXPECT_EQ("%-2d", std::string(buf));
+ SafeSPrintf(fmt, "%%%dd", std::numeric_limits<ssize_t>::max()-1);
+ EXPECT_EQ(std::numeric_limits<ssize_t>::max()-1,
+ SafeSNPrintf(buf, 4, fmt, 1));
+ EXPECT_EQ(" ", std::string(buf));
+ SafeSPrintf(fmt, "%%0%dd", std::numeric_limits<ssize_t>::max()-1);
+ EXPECT_EQ(std::numeric_limits<ssize_t>::max()-1,
+ SafeSNPrintf(buf, 4, fmt, 1));
+ EXPECT_EQ("000", std::string(buf));
+ SafeSPrintf(fmt, "%%%dd",
+ static_cast<size_t>(std::numeric_limits<ssize_t>::max()));
+#if defined(NDEBUG)
+ EXPECT_EQ(2, SafeSPrintf(buf, fmt, 1));
+ EXPECT_EQ("%d", std::string(buf));
+#elif defined(ALLOW_DEATH_TEST)
+ EXPECT_DEATH(SafeSPrintf(buf, fmt, 1), "padding <= max_padding");
+#endif
+
+ // Hex %X
+ EXPECT_EQ(1, SafeSPrintf(buf, "%X", 1));
+ EXPECT_EQ("1", std::string(buf));
+ EXPECT_EQ(2, SafeSPrintf(buf, "%2X", 1));
+ EXPECT_EQ(" 1", std::string(buf));
+ EXPECT_EQ(2, SafeSPrintf(buf, "%02X", 1));
+ EXPECT_EQ("01", std::string(buf));
+ EXPECT_EQ(9, SafeSPrintf(buf, "%9X", -1));
+ EXPECT_EQ(" FFFFFFFF", std::string(buf));
+ EXPECT_EQ(9, SafeSPrintf(buf, "%09X", -1));
+ EXPECT_EQ("0FFFFFFFF", std::string(buf));
+ EXPECT_EQ(17, SafeSPrintf(buf, "%17X", -1LL));
+ EXPECT_EQ(" FFFFFFFFFFFFFFFF", std::string(buf));
+ EXPECT_EQ(17, SafeSPrintf(buf, "%017X", -1LL));
+ EXPECT_EQ("0FFFFFFFFFFFFFFFF", std::string(buf));
+ EXPECT_EQ(3, SafeSPrintf(buf, "%2X", 0x111));
+ EXPECT_EQ("111", std::string(buf));
+ EXPECT_EQ(4, SafeSPrintf(buf, "%-2X", 1));
+ EXPECT_EQ("%-2X", std::string(buf));
+ SafeSPrintf(fmt, "%%%dX", std::numeric_limits<ssize_t>::max()-1);
+ EXPECT_EQ(std::numeric_limits<ssize_t>::max()-1,
+ SafeSNPrintf(buf, 4, fmt, 1));
+ EXPECT_EQ(" ", std::string(buf));
+ SafeSPrintf(fmt, "%%0%dX", std::numeric_limits<ssize_t>::max()-1);
+ EXPECT_EQ(std::numeric_limits<ssize_t>::max()-1,
+ SafeSNPrintf(buf, 4, fmt, 1));
+ EXPECT_EQ("000", std::string(buf));
+ SafeSPrintf(fmt, "%%%dX",
+ static_cast<size_t>(std::numeric_limits<ssize_t>::max()));
+#if defined(NDEBUG)
+ EXPECT_EQ(2, SafeSPrintf(buf, fmt, 1));
+ EXPECT_EQ("%X", std::string(buf));
+#elif defined(ALLOW_DEATH_TEST)
+ EXPECT_DEATH(SafeSPrintf(buf, fmt, 1), "padding <= max_padding");
+#endif
+
+ // Pointer %p
+ EXPECT_EQ(3, SafeSPrintf(buf, "%p", (void*)1));
+ EXPECT_EQ("0x1", std::string(buf));
+ EXPECT_EQ(4, SafeSPrintf(buf, "%4p", (void*)1));
+ EXPECT_EQ(" 0x1", std::string(buf));
+ EXPECT_EQ(4, SafeSPrintf(buf, "%04p", (void*)1));
+ EXPECT_EQ("0x01", std::string(buf));
+ EXPECT_EQ(5, SafeSPrintf(buf, "%4p", (void*)0x111));
+ EXPECT_EQ("0x111", std::string(buf));
+ EXPECT_EQ(4, SafeSPrintf(buf, "%-2p", (void*)1));
+ EXPECT_EQ("%-2p", std::string(buf));
+ SafeSPrintf(fmt, "%%%dp", std::numeric_limits<ssize_t>::max()-1);
+ EXPECT_EQ(std::numeric_limits<ssize_t>::max()-1,
+ SafeSNPrintf(buf, 4, fmt, (void*)1));
+ EXPECT_EQ(" ", std::string(buf));
+ SafeSPrintf(fmt, "%%0%dp", std::numeric_limits<ssize_t>::max()-1);
+ EXPECT_EQ(std::numeric_limits<ssize_t>::max()-1,
+ SafeSNPrintf(buf, 4, fmt, (void*)1));
+ EXPECT_EQ("0x0", std::string(buf));
+ SafeSPrintf(fmt, "%%%dp",
+ static_cast<size_t>(std::numeric_limits<ssize_t>::max()));
+#if defined(NDEBUG)
+ EXPECT_EQ(2, SafeSPrintf(buf, fmt, 1));
+ EXPECT_EQ("%p", std::string(buf));
+#elif defined(ALLOW_DEATH_TEST)
+ EXPECT_DEATH(SafeSPrintf(buf, fmt, 1), "padding <= max_padding");
+#endif
+
+ // String
+ EXPECT_EQ(1, SafeSPrintf(buf, "%s", "A"));
+ EXPECT_EQ("A", std::string(buf));
+ EXPECT_EQ(2, SafeSPrintf(buf, "%2s", "A"));
+ EXPECT_EQ(" A", std::string(buf));
+ EXPECT_EQ(2, SafeSPrintf(buf, "%02s", "A"));
+ EXPECT_EQ(" A", std::string(buf));
+ EXPECT_EQ(3, SafeSPrintf(buf, "%2s", "AAA"));
+ EXPECT_EQ("AAA", std::string(buf));
+ EXPECT_EQ(4, SafeSPrintf(buf, "%-2s", "A"));
+ EXPECT_EQ("%-2s", std::string(buf));
+ SafeSPrintf(fmt, "%%%ds", std::numeric_limits<ssize_t>::max()-1);
+ EXPECT_EQ(std::numeric_limits<ssize_t>::max()-1,
+ SafeSNPrintf(buf, 4, fmt, "A"));
+ EXPECT_EQ(" ", std::string(buf));
+ SafeSPrintf(fmt, "%%0%ds", std::numeric_limits<ssize_t>::max()-1);
+ EXPECT_EQ(std::numeric_limits<ssize_t>::max()-1,
+ SafeSNPrintf(buf, 4, fmt, "A"));
+ EXPECT_EQ(" ", std::string(buf));
+ SafeSPrintf(fmt, "%%%ds",
+ static_cast<size_t>(std::numeric_limits<ssize_t>::max()));
+#if defined(NDEBUG)
+ EXPECT_EQ(2, SafeSPrintf(buf, fmt, "A"));
+ EXPECT_EQ("%s", std::string(buf));
+#elif defined(ALLOW_DEATH_TEST)
+ EXPECT_DEATH(SafeSPrintf(buf, fmt, "A"), "padding <= max_padding");
+#endif
+}
+
+TEST(SafeSPrintfTest, EmbeddedNul) {
+ char buf[] = { 'X', 'X', 'X', 'X' };
+ EXPECT_EQ(2, SafeSPrintf(buf, "%3c", 0));
+ EXPECT_EQ(' ', buf[0]);
+ EXPECT_EQ(' ', buf[1]);
+ EXPECT_EQ(0, buf[2]);
+ EXPECT_EQ('X', buf[3]);
+
+ // Check handling of a NUL format character. N.B. this takes two different
+ // code paths depending on whether we are actually passing arguments. If
+ // we don't have any arguments, we are running in the fast-path code, that
+ // looks (almost) like a strncpy().
+#if defined(NDEBUG)
+ EXPECT_EQ(2, SafeSPrintf(buf, "%%%"));
+ EXPECT_EQ("%%", std::string(buf));
+ EXPECT_EQ(2, SafeSPrintf(buf, "%%%", 0));
+ EXPECT_EQ("%%", std::string(buf));
+#elif defined(ALLOW_DEATH_TEST)
+ EXPECT_DEATH(SafeSPrintf(buf, "%%%"), "src.1. == '%'");
+ EXPECT_DEATH(SafeSPrintf(buf, "%%%", 0), "ch");
+#endif
+}
+
+TEST(SafeSPrintfTest, EmitNULL) {
+ char buf[40];
+#if defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wconversion-null"
+#endif
+ EXPECT_EQ(1, SafeSPrintf(buf, "%d", NULL));
+ EXPECT_EQ("0", std::string(buf));
+ EXPECT_EQ(3, SafeSPrintf(buf, "%p", NULL));
+ EXPECT_EQ("0x0", std::string(buf));
+ EXPECT_EQ(6, SafeSPrintf(buf, "%s", NULL));
+ EXPECT_EQ("<NULL>", std::string(buf));
+#if defined(__GCC__)
+#pragma GCC diagnostic pop
+#endif
+}
+
+TEST(SafeSPrintfTest, PointerSize) {
+ // The internal data representation is a 64bit value, independent of the
+ // native word size. We want to perform sign-extension for signed integers,
+ // but we want to avoid doing so for pointer types. This could be a
+ // problem on systems, where pointers are only 32bit. This tests verifies
+ // that there is no such problem.
+ char *str = reinterpret_cast<char *>(0x80000000u);
+ void *ptr = str;
+ char buf[40];
+ EXPECT_EQ(10, SafeSPrintf(buf, "%p", str));
+ EXPECT_EQ("0x80000000", std::string(buf));
+ EXPECT_EQ(10, SafeSPrintf(buf, "%p", ptr));
+ EXPECT_EQ("0x80000000", std::string(buf));
+}
+
+} // namespace strings
+} // namespace base
diff --git a/security/sandbox/chromium/base/strings/string16.cc b/security/sandbox/chromium/base/strings/string16.cc
new file mode 100644
index 0000000000..84962e6710
--- /dev/null
+++ b/security/sandbox/chromium/base/strings/string16.cc
@@ -0,0 +1,87 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/string16.h"
+
+#if defined(WCHAR_T_IS_UTF16) && !defined(_AIX)
+
+#error This file should not be used on 2-byte wchar_t systems
+// If this winds up being needed on 2-byte wchar_t systems, either the
+// definitions below can be used, or the host system's wide character
+// functions like wmemcmp can be wrapped.
+
+#elif defined(WCHAR_T_IS_UTF32)
+
+#include <ostream>
+
+#include "base/strings/string_piece.h"
+
+namespace base {
+
+int c16memcmp(const char16* s1, const char16* s2, size_t n) {
+ // We cannot call memcmp because that changes the semantics.
+ while (n-- > 0) {
+ if (*s1 != *s2) {
+ // We cannot use (*s1 - *s2) because char16 is unsigned.
+ return ((*s1 < *s2) ? -1 : 1);
+ }
+ ++s1;
+ ++s2;
+ }
+ return 0;
+}
+
+size_t c16len(const char16* s) {
+ const char16 *s_orig = s;
+ while (*s) {
+ ++s;
+ }
+ return s - s_orig;
+}
+
+const char16* c16memchr(const char16* s, char16 c, size_t n) {
+ while (n-- > 0) {
+ if (*s == c) {
+ return s;
+ }
+ ++s;
+ }
+ return nullptr;
+}
+
+char16* c16memmove(char16* s1, const char16* s2, size_t n) {
+ return static_cast<char16*>(memmove(s1, s2, n * sizeof(char16)));
+}
+
+char16* c16memcpy(char16* s1, const char16* s2, size_t n) {
+ return static_cast<char16*>(memcpy(s1, s2, n * sizeof(char16)));
+}
+
+char16* c16memset(char16* s, char16 c, size_t n) {
+ char16 *s_orig = s;
+ while (n-- > 0) {
+ *s = c;
+ ++s;
+ }
+ return s_orig;
+}
+
+namespace string16_internals {
+
+std::ostream& operator<<(std::ostream& out, const string16& str) {
+ return out << base::StringPiece16(str);
+}
+
+void PrintTo(const string16& str, std::ostream* out) {
+ *out << str;
+}
+
+} // namespace string16_internals
+
+} // namespace base
+
+template class std::
+ basic_string<base::char16, base::string16_internals::string16_char_traits>;
+
+#endif // WCHAR_T_IS_UTF32
diff --git a/security/sandbox/chromium/base/strings/string16.h b/security/sandbox/chromium/base/strings/string16.h
new file mode 100644
index 0000000000..3cb6c7c495
--- /dev/null
+++ b/security/sandbox/chromium/base/strings/string16.h
@@ -0,0 +1,229 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_STRINGS_STRING16_H_
+#define BASE_STRINGS_STRING16_H_
+
+// WHAT:
+// A version of std::basic_string that provides 2-byte characters even when
+// wchar_t is not implemented as a 2-byte type. You can access this class as
+// string16. We also define char16, which string16 is based upon.
+//
+// WHY:
+// On Windows, wchar_t is 2 bytes, and it can conveniently handle UTF-16/UCS-2
+// data. Plenty of existing code operates on strings encoded as UTF-16.
+//
+// On many other platforms, sizeof(wchar_t) is 4 bytes by default. We can make
+// it 2 bytes by using the GCC flag -fshort-wchar. But then std::wstring fails
+// at run time, because it calls some functions (like wcslen) that come from
+// the system's native C library -- which was built with a 4-byte wchar_t!
+// It's wasteful to use 4-byte wchar_t strings to carry UTF-16 data, and it's
+// entirely improper on those systems where the encoding of wchar_t is defined
+// as UTF-32.
+//
+// Here, we define string16, which is similar to std::wstring but replaces all
+// libc functions with custom, 2-byte-char compatible routines. It is capable
+// of carrying UTF-16-encoded data.
+
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+
+#include <functional>
+#include <string>
+
+#include "base/base_export.h"
+#include "build/build_config.h"
+
+#if defined(WCHAR_T_IS_UTF16)
+
+// Define a macro for wrapping construction of char16 arrays and string16s from
+// a literal string. This indirection allows for an easier migration of
+// base::char16 to char16_t on platforms where WCHAR_T_IS_UTF16, as only a one
+// character change to the macro will be necessary.
+// This macro does not exist when WCHAR_T_IS_UTF32, as it is currently not
+// possible to create a char array form a literal in this case.
+// TODO(https://crbug.com/911896): Remove this macro once base::char16 is
+// char16_t on all platforms.
+#define STRING16_LITERAL(x) L##x
+
+namespace base {
+
+typedef wchar_t char16;
+typedef std::wstring string16;
+
+} // namespace base
+
+#elif defined(WCHAR_T_IS_UTF32)
+
+#include <wchar.h> // for mbstate_t
+
+namespace base {
+
+typedef uint16_t char16;
+
+// char16 versions of the functions required by string16_char_traits; these
+// are based on the wide character functions of similar names ("w" or "wcs"
+// instead of "c16").
+BASE_EXPORT int c16memcmp(const char16* s1, const char16* s2, size_t n);
+BASE_EXPORT size_t c16len(const char16* s);
+BASE_EXPORT const char16* c16memchr(const char16* s, char16 c, size_t n);
+BASE_EXPORT char16* c16memmove(char16* s1, const char16* s2, size_t n);
+BASE_EXPORT char16* c16memcpy(char16* s1, const char16* s2, size_t n);
+BASE_EXPORT char16* c16memset(char16* s, char16 c, size_t n);
+
+// This namespace contains the implementation of base::string16 along with
+// things that need to be found via argument-dependent lookup from a
+// base::string16.
+namespace string16_internals {
+
+struct string16_char_traits {
+ typedef char16 char_type;
+ typedef int int_type;
+
+ // int_type needs to be able to hold each possible value of char_type, and in
+ // addition, the distinct value of eof().
+ static_assert(sizeof(int_type) > sizeof(char_type),
+ "int must be larger than 16 bits wide");
+
+ typedef std::streamoff off_type;
+ typedef mbstate_t state_type;
+ typedef std::fpos<state_type> pos_type;
+
+ static void assign(char_type& c1, const char_type& c2) {
+ c1 = c2;
+ }
+
+ static bool eq(const char_type& c1, const char_type& c2) {
+ return c1 == c2;
+ }
+ static bool lt(const char_type& c1, const char_type& c2) {
+ return c1 < c2;
+ }
+
+ static int compare(const char_type* s1, const char_type* s2, size_t n) {
+ return c16memcmp(s1, s2, n);
+ }
+
+ static size_t length(const char_type* s) {
+ return c16len(s);
+ }
+
+ static const char_type* find(const char_type* s, size_t n,
+ const char_type& a) {
+ return c16memchr(s, a, n);
+ }
+
+ static char_type* move(char_type* s1, const char_type* s2, size_t n) {
+ return c16memmove(s1, s2, n);
+ }
+
+ static char_type* copy(char_type* s1, const char_type* s2, size_t n) {
+ return c16memcpy(s1, s2, n);
+ }
+
+ static char_type* assign(char_type* s, size_t n, char_type a) {
+ return c16memset(s, a, n);
+ }
+
+ static int_type not_eof(const int_type& c) {
+ return eq_int_type(c, eof()) ? 0 : c;
+ }
+
+ static char_type to_char_type(const int_type& c) {
+ return char_type(c);
+ }
+
+ static int_type to_int_type(const char_type& c) {
+ return int_type(c);
+ }
+
+ static bool eq_int_type(const int_type& c1, const int_type& c2) {
+ return c1 == c2;
+ }
+
+ static int_type eof() {
+ return static_cast<int_type>(EOF);
+ }
+};
+
+} // namespace string16_internals
+
+typedef std::basic_string<char16,
+ base::string16_internals::string16_char_traits>
+ string16;
+
+namespace string16_internals {
+
+BASE_EXPORT extern std::ostream& operator<<(std::ostream& out,
+ const string16& str);
+
+// This is required by googletest to print a readable output on test failures.
+BASE_EXPORT extern void PrintTo(const string16& str, std::ostream* out);
+
+} // namespace string16_internals
+
+} // namespace base
+
+// The string class will be explicitly instantiated only once, in string16.cc.
+//
+// std::basic_string<> in GNU libstdc++ contains a static data member,
+// _S_empty_rep_storage, to represent empty strings. When an operation such
+// as assignment or destruction is performed on a string, causing its existing
+// data member to be invalidated, it must not be freed if this static data
+// member is being used. Otherwise, it counts as an attempt to free static
+// (and not allocated) data, which is a memory error.
+//
+// Generally, due to C++ template magic, _S_empty_rep_storage will be marked
+// as a coalesced symbol, meaning that the linker will combine multiple
+// instances into a single one when generating output.
+//
+// If a string class is used by multiple shared libraries, a problem occurs.
+// Each library will get its own copy of _S_empty_rep_storage. When strings
+// are passed across a library boundary for alteration or destruction, memory
+// errors will result. GNU libstdc++ contains a configuration option,
+// --enable-fully-dynamic-string (_GLIBCXX_FULLY_DYNAMIC_STRING), which
+// disables the static data member optimization, but it's a good optimization
+// and non-STL code is generally at the mercy of the system's STL
+// configuration. Fully-dynamic strings are not the default for GNU libstdc++
+// libstdc++ itself or for the libstdc++ installations on the systems we care
+// about, such as Mac OS X and relevant flavors of Linux.
+//
+// See also http://gcc.gnu.org/bugzilla/show_bug.cgi?id=24196 .
+//
+// To avoid problems, string classes need to be explicitly instantiated only
+// once, in exactly one library. All other string users see it via an "extern"
+// declaration. This is precisely how GNU libstdc++ handles
+// std::basic_string<char> (string) and std::basic_string<wchar_t> (wstring).
+//
+// This also works around a Mac OS X linker bug in ld64-85.2.1 (Xcode 3.1.2),
+// in which the linker does not fully coalesce symbols when dead code
+// stripping is enabled. This bug causes the memory errors described above
+// to occur even when a std::basic_string<> does not cross shared library
+// boundaries, such as in statically-linked executables.
+//
+// TODO(mark): File this bug with Apple and update this note with a bug number.
+
+extern template class BASE_EXPORT
+ std::basic_string<base::char16,
+ base::string16_internals::string16_char_traits>;
+
+// Specialize std::hash for base::string16. Although the style guide forbids
+// this in general, it is necessary for consistency with WCHAR_T_IS_UTF16
+// platforms, where base::string16 is a type alias for std::wstring.
+namespace std {
+template <>
+struct hash<base::string16> {
+ std::size_t operator()(const base::string16& s) const {
+ std::size_t result = 0;
+ for (base::char16 c : s)
+ result = (result * 131) + c;
+ return result;
+ }
+};
+} // namespace std
+
+#endif // WCHAR_T_IS_UTF32
+
+#endif // BASE_STRINGS_STRING16_H_
diff --git a/security/sandbox/chromium/base/strings/string_number_conversions.cc b/security/sandbox/chromium/base/strings/string_number_conversions.cc
new file mode 100644
index 0000000000..3b5eda1fe3
--- /dev/null
+++ b/security/sandbox/chromium/base/strings/string_number_conversions.cc
@@ -0,0 +1,545 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/string_number_conversions.h"
+
+#include <ctype.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <wctype.h>
+
+#include <limits>
+#include <type_traits>
+
+#include "base/logging.h"
+#include "base/no_destructor.h"
+#include "base/numerics/safe_math.h"
+#include "base/strings/string_util.h"
+#include "base/strings/utf_string_conversions.h"
+#include "double-conversion/double-conversion.h"
+
+namespace base {
+
+namespace {
+
+template <typename STR, typename INT>
+struct IntToStringT {
+ static STR IntToString(INT value) {
+ // log10(2) ~= 0.3 bytes needed per bit or per byte log10(2**8) ~= 2.4.
+ // So round up to allocate 3 output characters per byte, plus 1 for '-'.
+ const size_t kOutputBufSize =
+ 3 * sizeof(INT) + std::numeric_limits<INT>::is_signed;
+
+ // Create the string in a temporary buffer, write it back to front, and
+ // then return the substr of what we ended up using.
+ using CHR = typename STR::value_type;
+ CHR outbuf[kOutputBufSize];
+
+ // The ValueOrDie call below can never fail, because UnsignedAbs is valid
+ // for all valid inputs.
+ typename std::make_unsigned<INT>::type res =
+ CheckedNumeric<INT>(value).UnsignedAbs().ValueOrDie();
+
+ CHR* end = outbuf + kOutputBufSize;
+ CHR* i = end;
+ do {
+ --i;
+ DCHECK(i != outbuf);
+ *i = static_cast<CHR>((res % 10) + '0');
+ res /= 10;
+ } while (res != 0);
+ if (IsValueNegative(value)) {
+ --i;
+ DCHECK(i != outbuf);
+ *i = static_cast<CHR>('-');
+ }
+ return STR(i, end);
+ }
+};
+
+// Utility to convert a character to a digit in a given base
+template<typename CHAR, int BASE, bool BASE_LTE_10> class BaseCharToDigit {
+};
+
+// Faster specialization for bases <= 10
+template<typename CHAR, int BASE> class BaseCharToDigit<CHAR, BASE, true> {
+ public:
+ static bool Convert(CHAR c, uint8_t* digit) {
+ if (c >= '0' && c < '0' + BASE) {
+ *digit = static_cast<uint8_t>(c - '0');
+ return true;
+ }
+ return false;
+ }
+};
+
+// Specialization for bases where 10 < base <= 36
+template<typename CHAR, int BASE> class BaseCharToDigit<CHAR, BASE, false> {
+ public:
+ static bool Convert(CHAR c, uint8_t* digit) {
+ if (c >= '0' && c <= '9') {
+ *digit = c - '0';
+ } else if (c >= 'a' && c < 'a' + BASE - 10) {
+ *digit = c - 'a' + 10;
+ } else if (c >= 'A' && c < 'A' + BASE - 10) {
+ *digit = c - 'A' + 10;
+ } else {
+ return false;
+ }
+ return true;
+ }
+};
+
+template <int BASE, typename CHAR>
+bool CharToDigit(CHAR c, uint8_t* digit) {
+ return BaseCharToDigit<CHAR, BASE, BASE <= 10>::Convert(c, digit);
+}
+
+// There is an IsUnicodeWhitespace for wchars defined in string_util.h, but it
+// is locale independent, whereas the functions we are replacing were
+// locale-dependent. TBD what is desired, but for the moment let's not
+// introduce a change in behaviour.
+template<typename CHAR> class WhitespaceHelper {
+};
+
+template<> class WhitespaceHelper<char> {
+ public:
+ static bool Invoke(char c) {
+ return 0 != isspace(static_cast<unsigned char>(c));
+ }
+};
+
+template<> class WhitespaceHelper<char16> {
+ public:
+ static bool Invoke(char16 c) {
+ return 0 != iswspace(c);
+ }
+};
+
+template<typename CHAR> bool LocalIsWhitespace(CHAR c) {
+ return WhitespaceHelper<CHAR>::Invoke(c);
+}
+
+// IteratorRangeToNumberTraits should provide:
+// - a typedef for iterator_type, the iterator type used as input.
+// - a typedef for value_type, the target numeric type.
+// - static functions min, max (returning the minimum and maximum permitted
+// values)
+// - constant kBase, the base in which to interpret the input
+template<typename IteratorRangeToNumberTraits>
+class IteratorRangeToNumber {
+ public:
+ typedef IteratorRangeToNumberTraits traits;
+ typedef typename traits::iterator_type const_iterator;
+ typedef typename traits::value_type value_type;
+
+ // Generalized iterator-range-to-number conversion.
+ //
+ static bool Invoke(const_iterator begin,
+ const_iterator end,
+ value_type* output) {
+ bool valid = true;
+
+ while (begin != end && LocalIsWhitespace(*begin)) {
+ valid = false;
+ ++begin;
+ }
+
+ if (begin != end && *begin == '-') {
+ if (!std::numeric_limits<value_type>::is_signed) {
+ *output = 0;
+ valid = false;
+ } else if (!Negative::Invoke(begin + 1, end, output)) {
+ valid = false;
+ }
+ } else {
+ if (begin != end && *begin == '+') {
+ ++begin;
+ }
+ if (!Positive::Invoke(begin, end, output)) {
+ valid = false;
+ }
+ }
+
+ return valid;
+ }
+
+ private:
+ // Sign provides:
+ // - a static function, CheckBounds, that determines whether the next digit
+ // causes an overflow/underflow
+ // - a static function, Increment, that appends the next digit appropriately
+ // according to the sign of the number being parsed.
+ template<typename Sign>
+ class Base {
+ public:
+ static bool Invoke(const_iterator begin, const_iterator end,
+ typename traits::value_type* output) {
+ *output = 0;
+
+ if (begin == end) {
+ return false;
+ }
+
+ // Note: no performance difference was found when using template
+ // specialization to remove this check in bases other than 16
+ if (traits::kBase == 16 && end - begin > 2 && *begin == '0' &&
+ (*(begin + 1) == 'x' || *(begin + 1) == 'X')) {
+ begin += 2;
+ }
+
+ for (const_iterator current = begin; current != end; ++current) {
+ uint8_t new_digit = 0;
+
+ if (!CharToDigit<traits::kBase>(*current, &new_digit)) {
+ return false;
+ }
+
+ if (current != begin) {
+ if (!Sign::CheckBounds(output, new_digit)) {
+ return false;
+ }
+ *output *= traits::kBase;
+ }
+
+ Sign::Increment(new_digit, output);
+ }
+ return true;
+ }
+ };
+
+ class Positive : public Base<Positive> {
+ public:
+ static bool CheckBounds(value_type* output, uint8_t new_digit) {
+ if (*output > static_cast<value_type>(traits::max() / traits::kBase) ||
+ (*output == static_cast<value_type>(traits::max() / traits::kBase) &&
+ new_digit > traits::max() % traits::kBase)) {
+ *output = traits::max();
+ return false;
+ }
+ return true;
+ }
+ static void Increment(uint8_t increment, value_type* output) {
+ *output += increment;
+ }
+ };
+
+ class Negative : public Base<Negative> {
+ public:
+ static bool CheckBounds(value_type* output, uint8_t new_digit) {
+ if (*output < traits::min() / traits::kBase ||
+ (*output == traits::min() / traits::kBase &&
+ new_digit > 0 - traits::min() % traits::kBase)) {
+ *output = traits::min();
+ return false;
+ }
+ return true;
+ }
+ static void Increment(uint8_t increment, value_type* output) {
+ *output -= increment;
+ }
+ };
+};
+
+template<typename ITERATOR, typename VALUE, int BASE>
+class BaseIteratorRangeToNumberTraits {
+ public:
+ typedef ITERATOR iterator_type;
+ typedef VALUE value_type;
+ static value_type min() {
+ return std::numeric_limits<value_type>::min();
+ }
+ static value_type max() {
+ return std::numeric_limits<value_type>::max();
+ }
+ static const int kBase = BASE;
+};
+
+template<typename ITERATOR>
+class BaseHexIteratorRangeToIntTraits
+ : public BaseIteratorRangeToNumberTraits<ITERATOR, int, 16> {
+};
+
+template <typename ITERATOR>
+class BaseHexIteratorRangeToUIntTraits
+ : public BaseIteratorRangeToNumberTraits<ITERATOR, uint32_t, 16> {};
+
+template <typename ITERATOR>
+class BaseHexIteratorRangeToInt64Traits
+ : public BaseIteratorRangeToNumberTraits<ITERATOR, int64_t, 16> {};
+
+template <typename ITERATOR>
+class BaseHexIteratorRangeToUInt64Traits
+ : public BaseIteratorRangeToNumberTraits<ITERATOR, uint64_t, 16> {};
+
+typedef BaseHexIteratorRangeToIntTraits<StringPiece::const_iterator>
+ HexIteratorRangeToIntTraits;
+
+typedef BaseHexIteratorRangeToUIntTraits<StringPiece::const_iterator>
+ HexIteratorRangeToUIntTraits;
+
+typedef BaseHexIteratorRangeToInt64Traits<StringPiece::const_iterator>
+ HexIteratorRangeToInt64Traits;
+
+typedef BaseHexIteratorRangeToUInt64Traits<StringPiece::const_iterator>
+ HexIteratorRangeToUInt64Traits;
+
+template <typename VALUE, int BASE>
+class StringPieceToNumberTraits
+ : public BaseIteratorRangeToNumberTraits<StringPiece::const_iterator,
+ VALUE,
+ BASE> {
+};
+
+template <typename VALUE>
+bool StringToIntImpl(StringPiece input, VALUE* output) {
+ return IteratorRangeToNumber<StringPieceToNumberTraits<VALUE, 10> >::Invoke(
+ input.begin(), input.end(), output);
+}
+
+template <typename VALUE, int BASE>
+class StringPiece16ToNumberTraits
+ : public BaseIteratorRangeToNumberTraits<StringPiece16::const_iterator,
+ VALUE,
+ BASE> {
+};
+
+template <typename VALUE>
+bool String16ToIntImpl(StringPiece16 input, VALUE* output) {
+ return IteratorRangeToNumber<StringPiece16ToNumberTraits<VALUE, 10> >::Invoke(
+ input.begin(), input.end(), output);
+}
+
+} // namespace
+
+std::string NumberToString(int value) {
+ return IntToStringT<std::string, int>::IntToString(value);
+}
+
+string16 NumberToString16(int value) {
+ return IntToStringT<string16, int>::IntToString(value);
+}
+
+std::string NumberToString(unsigned value) {
+ return IntToStringT<std::string, unsigned>::IntToString(value);
+}
+
+string16 NumberToString16(unsigned value) {
+ return IntToStringT<string16, unsigned>::IntToString(value);
+}
+
+std::string NumberToString(long value) {
+ return IntToStringT<std::string, long>::IntToString(value);
+}
+
+string16 NumberToString16(long value) {
+ return IntToStringT<string16, long>::IntToString(value);
+}
+
+std::string NumberToString(unsigned long value) {
+ return IntToStringT<std::string, unsigned long>::IntToString(value);
+}
+
+string16 NumberToString16(unsigned long value) {
+ return IntToStringT<string16, unsigned long>::IntToString(value);
+}
+
+std::string NumberToString(long long value) {
+ return IntToStringT<std::string, long long>::IntToString(value);
+}
+
+string16 NumberToString16(long long value) {
+ return IntToStringT<string16, long long>::IntToString(value);
+}
+
+std::string NumberToString(unsigned long long value) {
+ return IntToStringT<std::string, unsigned long long>::IntToString(value);
+}
+
+string16 NumberToString16(unsigned long long value) {
+ return IntToStringT<string16, unsigned long long>::IntToString(value);
+}
+
+static const double_conversion::DoubleToStringConverter*
+GetDoubleToStringConverter() {
+ static NoDestructor<double_conversion::DoubleToStringConverter> converter(
+ double_conversion::DoubleToStringConverter::EMIT_POSITIVE_EXPONENT_SIGN,
+ nullptr, nullptr, 'e', -6, 12, 0, 0);
+ return converter.get();
+}
+
+std::string NumberToString(double value) {
+ char buffer[32];
+ double_conversion::StringBuilder builder(buffer, sizeof(buffer));
+ GetDoubleToStringConverter()->ToShortest(value, &builder);
+ return std::string(buffer, builder.position());
+}
+
+base::string16 NumberToString16(double value) {
+ char buffer[32];
+ double_conversion::StringBuilder builder(buffer, sizeof(buffer));
+ GetDoubleToStringConverter()->ToShortest(value, &builder);
+
+ // The number will be ASCII. This creates the string using the "input
+ // iterator" variant which promotes from 8-bit to 16-bit via "=".
+ return base::string16(&buffer[0], &buffer[builder.position()]);
+}
+
+bool StringToInt(StringPiece input, int* output) {
+ return StringToIntImpl(input, output);
+}
+
+bool StringToInt(StringPiece16 input, int* output) {
+ return String16ToIntImpl(input, output);
+}
+
+bool StringToUint(StringPiece input, unsigned* output) {
+ return StringToIntImpl(input, output);
+}
+
+bool StringToUint(StringPiece16 input, unsigned* output) {
+ return String16ToIntImpl(input, output);
+}
+
+bool StringToInt64(StringPiece input, int64_t* output) {
+ return StringToIntImpl(input, output);
+}
+
+bool StringToInt64(StringPiece16 input, int64_t* output) {
+ return String16ToIntImpl(input, output);
+}
+
+bool StringToUint64(StringPiece input, uint64_t* output) {
+ return StringToIntImpl(input, output);
+}
+
+bool StringToUint64(StringPiece16 input, uint64_t* output) {
+ return String16ToIntImpl(input, output);
+}
+
+bool StringToSizeT(StringPiece input, size_t* output) {
+ return StringToIntImpl(input, output);
+}
+
+bool StringToSizeT(StringPiece16 input, size_t* output) {
+ return String16ToIntImpl(input, output);
+}
+
+template <typename STRING, typename CHAR>
+bool StringToDoubleImpl(STRING input, const CHAR* data, double* output) {
+ static NoDestructor<double_conversion::StringToDoubleConverter> converter(
+ double_conversion::StringToDoubleConverter::ALLOW_LEADING_SPACES |
+ double_conversion::StringToDoubleConverter::ALLOW_TRAILING_JUNK,
+ 0.0, 0, nullptr, nullptr);
+
+ int processed_characters_count;
+ *output = converter->StringToDouble(data, input.size(),
+ &processed_characters_count);
+
+ // Cases to return false:
+ // - If the input string is empty, there was nothing to parse.
+ // - If the value saturated to HUGE_VAL.
+ // - If the entire string was not processed, there are either characters
+ // remaining in the string after a parsed number, or the string does not
+ // begin with a parseable number.
+ // - If the first character is a space, there was leading whitespace
+ return !input.empty() && *output != HUGE_VAL && *output != -HUGE_VAL &&
+ static_cast<size_t>(processed_characters_count) == input.size() &&
+ !IsUnicodeWhitespace(input[0]);
+}
+
+bool StringToDouble(StringPiece input, double* output) {
+ return StringToDoubleImpl(input, input.data(), output);
+}
+
+bool StringToDouble(StringPiece16 input, double* output) {
+ return StringToDoubleImpl(
+ input, reinterpret_cast<const uint16_t*>(input.data()), output);
+}
+
+std::string HexEncode(const void* bytes, size_t size) {
+ static const char kHexChars[] = "0123456789ABCDEF";
+
+ // Each input byte creates two output hex characters.
+ std::string ret(size * 2, '\0');
+
+ for (size_t i = 0; i < size; ++i) {
+ char b = reinterpret_cast<const char*>(bytes)[i];
+ ret[(i * 2)] = kHexChars[(b >> 4) & 0xf];
+ ret[(i * 2) + 1] = kHexChars[b & 0xf];
+ }
+ return ret;
+}
+
+std::string HexEncode(base::span<const uint8_t> bytes) {
+ return HexEncode(bytes.data(), bytes.size());
+}
+
+bool HexStringToInt(StringPiece input, int* output) {
+ return IteratorRangeToNumber<HexIteratorRangeToIntTraits>::Invoke(
+ input.begin(), input.end(), output);
+}
+
+bool HexStringToUInt(StringPiece input, uint32_t* output) {
+ return IteratorRangeToNumber<HexIteratorRangeToUIntTraits>::Invoke(
+ input.begin(), input.end(), output);
+}
+
+bool HexStringToInt64(StringPiece input, int64_t* output) {
+ return IteratorRangeToNumber<HexIteratorRangeToInt64Traits>::Invoke(
+ input.begin(), input.end(), output);
+}
+
+bool HexStringToUInt64(StringPiece input, uint64_t* output) {
+ return IteratorRangeToNumber<HexIteratorRangeToUInt64Traits>::Invoke(
+ input.begin(), input.end(), output);
+}
+
+template <typename Container>
+static bool HexStringToByteContainer(StringPiece input, Container* output) {
+ DCHECK_EQ(output->size(), 0u);
+ size_t count = input.size();
+ if (count == 0 || (count % 2) != 0)
+ return false;
+ for (uintptr_t i = 0; i < count / 2; ++i) {
+ uint8_t msb = 0; // most significant 4 bits
+ uint8_t lsb = 0; // least significant 4 bits
+ if (!CharToDigit<16>(input[i * 2], &msb) ||
+ !CharToDigit<16>(input[i * 2 + 1], &lsb)) {
+ return false;
+ }
+ output->push_back((msb << 4) | lsb);
+ }
+ return true;
+}
+
+bool HexStringToBytes(StringPiece input, std::vector<uint8_t>* output) {
+ return HexStringToByteContainer(input, output);
+}
+
+bool HexStringToString(StringPiece input, std::string* output) {
+ return HexStringToByteContainer(input, output);
+}
+
+bool HexStringToSpan(StringPiece input, base::span<uint8_t> output) {
+ size_t count = input.size();
+ if (count == 0 || (count % 2) != 0)
+ return false;
+
+ if (count / 2 != output.size())
+ return false;
+
+ for (uintptr_t i = 0; i < count / 2; ++i) {
+ uint8_t msb = 0; // most significant 4 bits
+ uint8_t lsb = 0; // least significant 4 bits
+ if (!CharToDigit<16>(input[i * 2], &msb) ||
+ !CharToDigit<16>(input[i * 2 + 1], &lsb)) {
+ return false;
+ }
+ output[i] = (msb << 4) | lsb;
+ }
+ return true;
+}
+
+} // namespace base
diff --git a/security/sandbox/chromium/base/strings/string_number_conversions.h b/security/sandbox/chromium/base/strings/string_number_conversions.h
new file mode 100644
index 0000000000..87df24e21c
--- /dev/null
+++ b/security/sandbox/chromium/base/strings/string_number_conversions.h
@@ -0,0 +1,157 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_STRINGS_STRING_NUMBER_CONVERSIONS_H_
+#define BASE_STRINGS_STRING_NUMBER_CONVERSIONS_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/containers/span.h"
+#include "base/strings/string16.h"
+#include "base/strings/string_piece.h"
+#include "build/build_config.h"
+
+// ----------------------------------------------------------------------------
+// IMPORTANT MESSAGE FROM YOUR SPONSOR
+//
+// This file contains no "wstring" variants. New code should use string16. If
+// you need to make old code work, use the UTF8 version and convert. Please do
+// not add wstring variants.
+//
+// Please do not add "convenience" functions for converting strings to integers
+// that return the value and ignore success/failure. That encourages people to
+// write code that doesn't properly handle the error conditions.
+//
+// DO NOT use these functions in any UI unless it's NOT localized on purpose.
+// Instead, use base::MessageFormatter for a complex message with numbers
+// (integer, float, double) embedded or base::Format{Number,Double,Percent} to
+// just format a single number/percent. Note that some languages use native
+// digits instead of ASCII digits while others use a group separator or decimal
+// point different from ',' and '.'. Using these functions in the UI would lead
+// numbers to be formatted in a non-native way.
+// ----------------------------------------------------------------------------
+
+namespace base {
+
+// Number -> string conversions ------------------------------------------------
+
+// Ignores locale! see warning above.
+BASE_EXPORT std::string NumberToString(int value);
+BASE_EXPORT string16 NumberToString16(int value);
+BASE_EXPORT std::string NumberToString(unsigned int value);
+BASE_EXPORT string16 NumberToString16(unsigned int value);
+BASE_EXPORT std::string NumberToString(long value);
+BASE_EXPORT string16 NumberToString16(long value);
+BASE_EXPORT std::string NumberToString(unsigned long value);
+BASE_EXPORT string16 NumberToString16(unsigned long value);
+BASE_EXPORT std::string NumberToString(long long value);
+BASE_EXPORT string16 NumberToString16(long long value);
+BASE_EXPORT std::string NumberToString(unsigned long long value);
+BASE_EXPORT string16 NumberToString16(unsigned long long value);
+BASE_EXPORT std::string NumberToString(double value);
+BASE_EXPORT string16 NumberToString16(double value);
+
+// String -> number conversions ------------------------------------------------
+
+// Perform a best-effort conversion of the input string to a numeric type,
+// setting |*output| to the result of the conversion. Returns true for
+// "perfect" conversions; returns false in the following cases:
+// - Overflow. |*output| will be set to the maximum value supported
+// by the data type.
+// - Underflow. |*output| will be set to the minimum value supported
+// by the data type.
+// - Trailing characters in the string after parsing the number. |*output|
+// will be set to the value of the number that was parsed.
+// - Leading whitespace in the string before parsing the number. |*output| will
+// be set to the value of the number that was parsed.
+// - No characters parseable as a number at the beginning of the string.
+// |*output| will be set to 0.
+// - Empty string. |*output| will be set to 0.
+// WARNING: Will write to |output| even when returning false.
+// Read the comments above carefully.
+BASE_EXPORT bool StringToInt(StringPiece input, int* output);
+BASE_EXPORT bool StringToInt(StringPiece16 input, int* output);
+
+BASE_EXPORT bool StringToUint(StringPiece input, unsigned* output);
+BASE_EXPORT bool StringToUint(StringPiece16 input, unsigned* output);
+
+BASE_EXPORT bool StringToInt64(StringPiece input, int64_t* output);
+BASE_EXPORT bool StringToInt64(StringPiece16 input, int64_t* output);
+
+BASE_EXPORT bool StringToUint64(StringPiece input, uint64_t* output);
+BASE_EXPORT bool StringToUint64(StringPiece16 input, uint64_t* output);
+
+BASE_EXPORT bool StringToSizeT(StringPiece input, size_t* output);
+BASE_EXPORT bool StringToSizeT(StringPiece16 input, size_t* output);
+
+// For floating-point conversions, only conversions of input strings in decimal
+// form are defined to work. Behavior with strings representing floating-point
+// numbers in hexadecimal, and strings representing non-finite values (such as
+// NaN and inf) is undefined. Otherwise, these behave the same as the integral
+// variants. This expects the input string to NOT be specific to the locale.
+// If your input is locale specific, use ICU to read the number.
+// WARNING: Will write to |output| even when returning false.
+// Read the comments here and above StringToInt() carefully.
+BASE_EXPORT bool StringToDouble(StringPiece input, double* output);
+BASE_EXPORT bool StringToDouble(StringPiece16 input, double* output);
+
+// Hex encoding ----------------------------------------------------------------
+
+// Returns a hex string representation of a binary buffer. The returned hex
+// string will be in upper case. This function does not check if |size| is
+// within reasonable limits since it's written with trusted data in mind. If
+// you suspect that the data you want to format might be large, the absolute
+// max size for |size| should be is
+// std::numeric_limits<size_t>::max() / 2
+BASE_EXPORT std::string HexEncode(const void* bytes, size_t size);
+BASE_EXPORT std::string HexEncode(base::span<const uint8_t> bytes);
+
+// Best effort conversion, see StringToInt above for restrictions.
+// Will only successful parse hex values that will fit into |output|, i.e.
+// -0x80000000 < |input| < 0x7FFFFFFF.
+BASE_EXPORT bool HexStringToInt(StringPiece input, int* output);
+
+// Best effort conversion, see StringToInt above for restrictions.
+// Will only successful parse hex values that will fit into |output|, i.e.
+// 0x00000000 < |input| < 0xFFFFFFFF.
+// The string is not required to start with 0x.
+BASE_EXPORT bool HexStringToUInt(StringPiece input, uint32_t* output);
+
+// Best effort conversion, see StringToInt above for restrictions.
+// Will only successful parse hex values that will fit into |output|, i.e.
+// -0x8000000000000000 < |input| < 0x7FFFFFFFFFFFFFFF.
+BASE_EXPORT bool HexStringToInt64(StringPiece input, int64_t* output);
+
+// Best effort conversion, see StringToInt above for restrictions.
+// Will only successful parse hex values that will fit into |output|, i.e.
+// 0x0000000000000000 < |input| < 0xFFFFFFFFFFFFFFFF.
+// The string is not required to start with 0x.
+BASE_EXPORT bool HexStringToUInt64(StringPiece input, uint64_t* output);
+
+// Similar to the previous functions, except that output is a vector of bytes.
+// |*output| will contain as many bytes as were successfully parsed prior to the
+// error. There is no overflow, but input.size() must be evenly divisible by 2.
+// Leading 0x or +/- are not allowed.
+BASE_EXPORT bool HexStringToBytes(StringPiece input,
+ std::vector<uint8_t>* output);
+
+// Same as HexStringToBytes, but for an std::string.
+BASE_EXPORT bool HexStringToString(StringPiece input, std::string* output);
+
+// Decodes the hex string |input| into a presized |output|. The output buffer
+// must be sized exactly to |input.size() / 2| or decoding will fail and no
+// bytes will be written to |output|. Decoding an empty input is also
+// considered a failure. When decoding fails due to encountering invalid input
+// characters, |output| will have been filled with the decoded bytes up until
+// the failure.
+BASE_EXPORT bool HexStringToSpan(StringPiece input, base::span<uint8_t> output);
+
+} // namespace base
+
+#endif // BASE_STRINGS_STRING_NUMBER_CONVERSIONS_H_
diff --git a/security/sandbox/chromium/base/strings/string_piece.cc b/security/sandbox/chromium/base/strings/string_piece.cc
new file mode 100644
index 0000000000..d743144a4e
--- /dev/null
+++ b/security/sandbox/chromium/base/strings/string_piece.cc
@@ -0,0 +1,426 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Copied from strings/stringpiece.cc with modifications
+
+#include "base/strings/string_piece.h"
+
+#include <limits.h>
+
+#include <algorithm>
+#include <ostream>
+
+#include "base/logging.h"
+#include "base/strings/utf_string_conversions.h"
+
+namespace base {
+namespace {
+
+// For each character in characters_wanted, sets the index corresponding
+// to the ASCII code of that character to 1 in table. This is used by
+// the find_.*_of methods below to tell whether or not a character is in
+// the lookup table in constant time.
+// The argument `table' must be an array that is large enough to hold all
+// the possible values of an unsigned char. Thus it should be be declared
+// as follows:
+// bool table[UCHAR_MAX + 1]
+inline void BuildLookupTable(const StringPiece& characters_wanted,
+ bool* table) {
+ const size_t length = characters_wanted.length();
+ const char* const data = characters_wanted.data();
+ for (size_t i = 0; i < length; ++i) {
+ table[static_cast<unsigned char>(data[i])] = true;
+ }
+}
+
+} // namespace
+
+// MSVC doesn't like complex extern templates and DLLs.
+#if !defined(COMPILER_MSVC)
+template class BasicStringPiece<std::string>;
+template class BasicStringPiece<string16>;
+#endif
+
+std::ostream& operator<<(std::ostream& o, const StringPiece& piece) {
+ o.write(piece.data(), static_cast<std::streamsize>(piece.size()));
+ return o;
+}
+
+std::ostream& operator<<(std::ostream& o, const StringPiece16& piece) {
+ return o << UTF16ToUTF8(piece);
+}
+
+namespace internal {
+
+template<typename STR>
+void AppendToStringT(const BasicStringPiece<STR>& self, STR* target) {
+ if (!self.empty())
+ target->append(self.data(), self.size());
+}
+
+void AppendToString(const StringPiece& self, std::string* target) {
+ AppendToStringT(self, target);
+}
+
+void AppendToString(const StringPiece16& self, string16* target) {
+ AppendToStringT(self, target);
+}
+
+template<typename STR>
+size_t copyT(const BasicStringPiece<STR>& self,
+ typename STR::value_type* buf,
+ size_t n,
+ size_t pos) {
+ size_t ret = std::min(self.size() - pos, n);
+ memcpy(buf, self.data() + pos, ret * sizeof(typename STR::value_type));
+ return ret;
+}
+
+size_t copy(const StringPiece& self, char* buf, size_t n, size_t pos) {
+ return copyT(self, buf, n, pos);
+}
+
+size_t copy(const StringPiece16& self, char16* buf, size_t n, size_t pos) {
+ return copyT(self, buf, n, pos);
+}
+
+template<typename STR>
+size_t findT(const BasicStringPiece<STR>& self,
+ const BasicStringPiece<STR>& s,
+ size_t pos) {
+ if (pos > self.size())
+ return BasicStringPiece<STR>::npos;
+
+ typename BasicStringPiece<STR>::const_iterator result =
+ std::search(self.begin() + pos, self.end(), s.begin(), s.end());
+ const size_t xpos =
+ static_cast<size_t>(result - self.begin());
+ return xpos + s.size() <= self.size() ? xpos : BasicStringPiece<STR>::npos;
+}
+
+size_t find(const StringPiece& self, const StringPiece& s, size_t pos) {
+ return findT(self, s, pos);
+}
+
+size_t find(const StringPiece16& self, const StringPiece16& s, size_t pos) {
+ return findT(self, s, pos);
+}
+
+template<typename STR>
+size_t findT(const BasicStringPiece<STR>& self,
+ typename STR::value_type c,
+ size_t pos) {
+ if (pos >= self.size())
+ return BasicStringPiece<STR>::npos;
+
+ typename BasicStringPiece<STR>::const_iterator result =
+ std::find(self.begin() + pos, self.end(), c);
+ return result != self.end() ?
+ static_cast<size_t>(result - self.begin()) : BasicStringPiece<STR>::npos;
+}
+
+size_t find(const StringPiece& self, char c, size_t pos) {
+ return findT(self, c, pos);
+}
+
+size_t find(const StringPiece16& self, char16 c, size_t pos) {
+ return findT(self, c, pos);
+}
+
+template<typename STR>
+size_t rfindT(const BasicStringPiece<STR>& self,
+ const BasicStringPiece<STR>& s,
+ size_t pos) {
+ if (self.size() < s.size())
+ return BasicStringPiece<STR>::npos;
+
+ if (s.empty())
+ return std::min(self.size(), pos);
+
+ typename BasicStringPiece<STR>::const_iterator last =
+ self.begin() + std::min(self.size() - s.size(), pos) + s.size();
+ typename BasicStringPiece<STR>::const_iterator result =
+ std::find_end(self.begin(), last, s.begin(), s.end());
+ return result != last ?
+ static_cast<size_t>(result - self.begin()) : BasicStringPiece<STR>::npos;
+}
+
+size_t rfind(const StringPiece& self, const StringPiece& s, size_t pos) {
+ return rfindT(self, s, pos);
+}
+
+size_t rfind(const StringPiece16& self, const StringPiece16& s, size_t pos) {
+ return rfindT(self, s, pos);
+}
+
+template<typename STR>
+size_t rfindT(const BasicStringPiece<STR>& self,
+ typename STR::value_type c,
+ size_t pos) {
+ if (self.size() == 0)
+ return BasicStringPiece<STR>::npos;
+
+ for (size_t i = std::min(pos, self.size() - 1); ;
+ --i) {
+ if (self.data()[i] == c)
+ return i;
+ if (i == 0)
+ break;
+ }
+ return BasicStringPiece<STR>::npos;
+}
+
+size_t rfind(const StringPiece& self, char c, size_t pos) {
+ return rfindT(self, c, pos);
+}
+
+size_t rfind(const StringPiece16& self, char16 c, size_t pos) {
+ return rfindT(self, c, pos);
+}
+
+// 8-bit version using lookup table.
+size_t find_first_of(const StringPiece& self,
+ const StringPiece& s,
+ size_t pos) {
+ if (self.size() == 0 || s.size() == 0)
+ return StringPiece::npos;
+
+ // Avoid the cost of BuildLookupTable() for a single-character search.
+ if (s.size() == 1)
+ return find(self, s.data()[0], pos);
+
+ bool lookup[UCHAR_MAX + 1] = { false };
+ BuildLookupTable(s, lookup);
+ for (size_t i = pos; i < self.size(); ++i) {
+ if (lookup[static_cast<unsigned char>(self.data()[i])]) {
+ return i;
+ }
+ }
+ return StringPiece::npos;
+}
+
+// 16-bit brute force version.
+size_t find_first_of(const StringPiece16& self,
+ const StringPiece16& s,
+ size_t pos) {
+ // Use the faster std::find() if searching for a single character.
+ StringPiece16::const_iterator found =
+ s.size() == 1 ? std::find(self.begin() + pos, self.end(), s[0])
+ : std::find_first_of(self.begin() + pos, self.end(),
+ s.begin(), s.end());
+ if (found == self.end())
+ return StringPiece16::npos;
+ return found - self.begin();
+}
+
+// 8-bit version using lookup table.
+size_t find_first_not_of(const StringPiece& self,
+ const StringPiece& s,
+ size_t pos) {
+ if (self.size() == 0)
+ return StringPiece::npos;
+
+ if (s.size() == 0)
+ return 0;
+
+ // Avoid the cost of BuildLookupTable() for a single-character search.
+ if (s.size() == 1)
+ return find_first_not_of(self, s.data()[0], pos);
+
+ bool lookup[UCHAR_MAX + 1] = { false };
+ BuildLookupTable(s, lookup);
+ for (size_t i = pos; i < self.size(); ++i) {
+ if (!lookup[static_cast<unsigned char>(self.data()[i])]) {
+ return i;
+ }
+ }
+ return StringPiece::npos;
+}
+
+// 16-bit brute-force version.
+BASE_EXPORT size_t find_first_not_of(const StringPiece16& self,
+ const StringPiece16& s,
+ size_t pos) {
+ if (self.size() == 0)
+ return StringPiece16::npos;
+
+ for (size_t self_i = pos; self_i < self.size(); ++self_i) {
+ bool found = false;
+ for (auto c : s) {
+ if (self[self_i] == c) {
+ found = true;
+ break;
+ }
+ }
+ if (!found)
+ return self_i;
+ }
+ return StringPiece16::npos;
+}
+
+template<typename STR>
+size_t find_first_not_ofT(const BasicStringPiece<STR>& self,
+ typename STR::value_type c,
+ size_t pos) {
+ if (self.size() == 0)
+ return BasicStringPiece<STR>::npos;
+
+ for (; pos < self.size(); ++pos) {
+ if (self.data()[pos] != c) {
+ return pos;
+ }
+ }
+ return BasicStringPiece<STR>::npos;
+}
+
+size_t find_first_not_of(const StringPiece& self,
+ char c,
+ size_t pos) {
+ return find_first_not_ofT(self, c, pos);
+}
+
+size_t find_first_not_of(const StringPiece16& self,
+ char16 c,
+ size_t pos) {
+ return find_first_not_ofT(self, c, pos);
+}
+
+// 8-bit version using lookup table.
+size_t find_last_of(const StringPiece& self, const StringPiece& s, size_t pos) {
+ if (self.size() == 0 || s.size() == 0)
+ return StringPiece::npos;
+
+ // Avoid the cost of BuildLookupTable() for a single-character search.
+ if (s.size() == 1)
+ return rfind(self, s.data()[0], pos);
+
+ bool lookup[UCHAR_MAX + 1] = { false };
+ BuildLookupTable(s, lookup);
+ for (size_t i = std::min(pos, self.size() - 1); ; --i) {
+ if (lookup[static_cast<unsigned char>(self.data()[i])])
+ return i;
+ if (i == 0)
+ break;
+ }
+ return StringPiece::npos;
+}
+
+// 16-bit brute-force version.
+size_t find_last_of(const StringPiece16& self,
+ const StringPiece16& s,
+ size_t pos) {
+ if (self.size() == 0)
+ return StringPiece16::npos;
+
+ for (size_t self_i = std::min(pos, self.size() - 1); ;
+ --self_i) {
+ for (auto c : s) {
+ if (self.data()[self_i] == c)
+ return self_i;
+ }
+ if (self_i == 0)
+ break;
+ }
+ return StringPiece16::npos;
+}
+
+// 8-bit version using lookup table.
+size_t find_last_not_of(const StringPiece& self,
+ const StringPiece& s,
+ size_t pos) {
+ if (self.size() == 0)
+ return StringPiece::npos;
+
+ size_t i = std::min(pos, self.size() - 1);
+ if (s.size() == 0)
+ return i;
+
+ // Avoid the cost of BuildLookupTable() for a single-character search.
+ if (s.size() == 1)
+ return find_last_not_of(self, s.data()[0], pos);
+
+ bool lookup[UCHAR_MAX + 1] = { false };
+ BuildLookupTable(s, lookup);
+ for (; ; --i) {
+ if (!lookup[static_cast<unsigned char>(self.data()[i])])
+ return i;
+ if (i == 0)
+ break;
+ }
+ return StringPiece::npos;
+}
+
+// 16-bit brute-force version.
+size_t find_last_not_of(const StringPiece16& self,
+ const StringPiece16& s,
+ size_t pos) {
+ if (self.size() == 0)
+ return StringPiece::npos;
+
+ for (size_t self_i = std::min(pos, self.size() - 1); ; --self_i) {
+ bool found = false;
+ for (auto c : s) {
+ if (self.data()[self_i] == c) {
+ found = true;
+ break;
+ }
+ }
+ if (!found)
+ return self_i;
+ if (self_i == 0)
+ break;
+ }
+ return StringPiece16::npos;
+}
+
+template<typename STR>
+size_t find_last_not_ofT(const BasicStringPiece<STR>& self,
+ typename STR::value_type c,
+ size_t pos) {
+ if (self.size() == 0)
+ return BasicStringPiece<STR>::npos;
+
+ for (size_t i = std::min(pos, self.size() - 1); ; --i) {
+ if (self.data()[i] != c)
+ return i;
+ if (i == 0)
+ break;
+ }
+ return BasicStringPiece<STR>::npos;
+}
+
+size_t find_last_not_of(const StringPiece& self,
+ char c,
+ size_t pos) {
+ return find_last_not_ofT(self, c, pos);
+}
+
+size_t find_last_not_of(const StringPiece16& self,
+ char16 c,
+ size_t pos) {
+ return find_last_not_ofT(self, c, pos);
+}
+
+template<typename STR>
+BasicStringPiece<STR> substrT(const BasicStringPiece<STR>& self,
+ size_t pos,
+ size_t n) {
+ if (pos > self.size()) pos = self.size();
+ if (n > self.size() - pos) n = self.size() - pos;
+ return BasicStringPiece<STR>(self.data() + pos, n);
+}
+
+StringPiece substr(const StringPiece& self,
+ size_t pos,
+ size_t n) {
+ return substrT(self, pos, n);
+}
+
+StringPiece16 substr(const StringPiece16& self,
+ size_t pos,
+ size_t n) {
+ return substrT(self, pos, n);
+}
+
+} // namespace internal
+} // namespace base
diff --git a/security/sandbox/chromium/base/strings/string_piece.h b/security/sandbox/chromium/base/strings/string_piece.h
new file mode 100644
index 0000000000..536bf3f023
--- /dev/null
+++ b/security/sandbox/chromium/base/strings/string_piece.h
@@ -0,0 +1,513 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Copied from strings/stringpiece.h with modifications
+//
+// A string-like object that points to a sized piece of memory.
+//
+// You can use StringPiece as a function or method parameter. A StringPiece
+// parameter can receive a double-quoted string literal argument, a "const
+// char*" argument, a string argument, or a StringPiece argument with no data
+// copying. Systematic use of StringPiece for arguments reduces data
+// copies and strlen() calls.
+//
+// Prefer passing StringPieces by value:
+// void MyFunction(StringPiece arg);
+// If circumstances require, you may also pass by const reference:
+// void MyFunction(const StringPiece& arg); // not preferred
+// Both of these have the same lifetime semantics. Passing by value
+// generates slightly smaller code. For more discussion, Googlers can see
+// the thread go/stringpiecebyvalue on c-users.
+
+#ifndef BASE_STRINGS_STRING_PIECE_H_
+#define BASE_STRINGS_STRING_PIECE_H_
+
+#include <stddef.h>
+
+#include <iosfwd>
+#include <string>
+#include <type_traits>
+
+#include "base/base_export.h"
+#include "base/logging.h"
+#include "base/strings/char_traits.h"
+#include "base/strings/string16.h"
+#include "base/strings/string_piece_forward.h"
+
+namespace base {
+
+// internal --------------------------------------------------------------------
+
+// Many of the StringPiece functions use different implementations for the
+// 8-bit and 16-bit versions, and we don't want lots of template expansions in
+// this (very common) header that will slow down compilation.
+//
+// So here we define overloaded functions called by the StringPiece template.
+// For those that share an implementation, the two versions will expand to a
+// template internal to the .cc file.
+namespace internal {
+
+BASE_EXPORT void AppendToString(const StringPiece& self, std::string* target);
+BASE_EXPORT void AppendToString(const StringPiece16& self, string16* target);
+
+BASE_EXPORT size_t copy(const StringPiece& self,
+ char* buf,
+ size_t n,
+ size_t pos);
+BASE_EXPORT size_t copy(const StringPiece16& self,
+ char16* buf,
+ size_t n,
+ size_t pos);
+
+BASE_EXPORT size_t find(const StringPiece& self,
+ const StringPiece& s,
+ size_t pos);
+BASE_EXPORT size_t find(const StringPiece16& self,
+ const StringPiece16& s,
+ size_t pos);
+BASE_EXPORT size_t find(const StringPiece& self,
+ char c,
+ size_t pos);
+BASE_EXPORT size_t find(const StringPiece16& self,
+ char16 c,
+ size_t pos);
+
+BASE_EXPORT size_t rfind(const StringPiece& self,
+ const StringPiece& s,
+ size_t pos);
+BASE_EXPORT size_t rfind(const StringPiece16& self,
+ const StringPiece16& s,
+ size_t pos);
+BASE_EXPORT size_t rfind(const StringPiece& self,
+ char c,
+ size_t pos);
+BASE_EXPORT size_t rfind(const StringPiece16& self,
+ char16 c,
+ size_t pos);
+
+BASE_EXPORT size_t find_first_of(const StringPiece& self,
+ const StringPiece& s,
+ size_t pos);
+BASE_EXPORT size_t find_first_of(const StringPiece16& self,
+ const StringPiece16& s,
+ size_t pos);
+
+BASE_EXPORT size_t find_first_not_of(const StringPiece& self,
+ const StringPiece& s,
+ size_t pos);
+BASE_EXPORT size_t find_first_not_of(const StringPiece16& self,
+ const StringPiece16& s,
+ size_t pos);
+BASE_EXPORT size_t find_first_not_of(const StringPiece& self,
+ char c,
+ size_t pos);
+BASE_EXPORT size_t find_first_not_of(const StringPiece16& self,
+ char16 c,
+ size_t pos);
+
+BASE_EXPORT size_t find_last_of(const StringPiece& self,
+ const StringPiece& s,
+ size_t pos);
+BASE_EXPORT size_t find_last_of(const StringPiece16& self,
+ const StringPiece16& s,
+ size_t pos);
+BASE_EXPORT size_t find_last_of(const StringPiece& self,
+ char c,
+ size_t pos);
+BASE_EXPORT size_t find_last_of(const StringPiece16& self,
+ char16 c,
+ size_t pos);
+
+BASE_EXPORT size_t find_last_not_of(const StringPiece& self,
+ const StringPiece& s,
+ size_t pos);
+BASE_EXPORT size_t find_last_not_of(const StringPiece16& self,
+ const StringPiece16& s,
+ size_t pos);
+BASE_EXPORT size_t find_last_not_of(const StringPiece16& self,
+ char16 c,
+ size_t pos);
+BASE_EXPORT size_t find_last_not_of(const StringPiece& self,
+ char c,
+ size_t pos);
+
+BASE_EXPORT StringPiece substr(const StringPiece& self,
+ size_t pos,
+ size_t n);
+BASE_EXPORT StringPiece16 substr(const StringPiece16& self,
+ size_t pos,
+ size_t n);
+
+} // namespace internal
+
+// BasicStringPiece ------------------------------------------------------------
+
+// Defines the types, methods, operators, and data members common to both
+// StringPiece and StringPiece16.
+//
+// This is templatized by string class type rather than character type, so
+// BasicStringPiece<std::string> or BasicStringPiece<base::string16>.
+template <typename STRING_TYPE> class BasicStringPiece {
+ public:
+ // Standard STL container boilerplate.
+ typedef size_t size_type;
+ typedef typename STRING_TYPE::value_type value_type;
+ typedef const value_type* pointer;
+ typedef const value_type& reference;
+ typedef const value_type& const_reference;
+ typedef ptrdiff_t difference_type;
+ typedef const value_type* const_iterator;
+ typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
+
+ static const size_type npos;
+
+ public:
+ // We provide non-explicit singleton constructors so users can pass
+ // in a "const char*" or a "string" wherever a "StringPiece" is
+ // expected (likewise for char16, string16, StringPiece16).
+ constexpr BasicStringPiece() : ptr_(NULL), length_(0) {}
+ // TODO(dcheng): Construction from nullptr is not allowed for
+ // std::basic_string_view, so remove the special handling for it.
+ // Note: This doesn't just use STRING_TYPE::traits_type::length(), since that
+ // isn't constexpr until C++17.
+ constexpr BasicStringPiece(const value_type* str)
+ : ptr_(str), length_(!str ? 0 : CharTraits<value_type>::length(str)) {}
+ BasicStringPiece(const STRING_TYPE& str)
+ : ptr_(str.data()), length_(str.size()) {}
+ constexpr BasicStringPiece(const value_type* offset, size_type len)
+ : ptr_(offset), length_(len) {}
+ BasicStringPiece(const typename STRING_TYPE::const_iterator& begin,
+ const typename STRING_TYPE::const_iterator& end) {
+ DCHECK(begin <= end) << "StringPiece iterators swapped or invalid.";
+ length_ = static_cast<size_t>(std::distance(begin, end));
+
+ // The length test before assignment is to avoid dereferencing an iterator
+ // that may point to the end() of a string.
+ ptr_ = length_ > 0 ? &*begin : nullptr;
+ }
+
+ // data() may return a pointer to a buffer with embedded NULs, and the
+ // returned buffer may or may not be null terminated. Therefore it is
+ // typically a mistake to pass data() to a routine that expects a NUL
+ // terminated string.
+ constexpr const value_type* data() const { return ptr_; }
+ constexpr size_type size() const noexcept { return length_; }
+ constexpr size_type length() const noexcept { return length_; }
+ bool empty() const { return length_ == 0; }
+
+ constexpr value_type operator[](size_type i) const {
+ CHECK(i < length_);
+ return ptr_[i];
+ }
+
+ value_type front() const {
+ CHECK_NE(0UL, length_);
+ return ptr_[0];
+ }
+
+ value_type back() const {
+ CHECK_NE(0UL, length_);
+ return ptr_[length_ - 1];
+ }
+
+ constexpr void remove_prefix(size_type n) {
+ CHECK(n <= length_);
+ ptr_ += n;
+ length_ -= n;
+ }
+
+ constexpr void remove_suffix(size_type n) {
+ CHECK(n <= length_);
+ length_ -= n;
+ }
+
+ constexpr int compare(BasicStringPiece x) const noexcept {
+ int r = CharTraits<value_type>::compare(
+ ptr_, x.ptr_, (length_ < x.length_ ? length_ : x.length_));
+ if (r == 0) {
+ if (length_ < x.length_) r = -1;
+ else if (length_ > x.length_) r = +1;
+ }
+ return r;
+ }
+
+ // This is the style of conversion preferred by std::string_view in C++17.
+ explicit operator STRING_TYPE() const { return as_string(); }
+
+ STRING_TYPE as_string() const {
+ // std::string doesn't like to take a NULL pointer even with a 0 size.
+ return empty() ? STRING_TYPE() : STRING_TYPE(data(), size());
+ }
+
+ const_iterator begin() const { return ptr_; }
+ const_iterator end() const { return ptr_ + length_; }
+ const_reverse_iterator rbegin() const {
+ return const_reverse_iterator(ptr_ + length_);
+ }
+ const_reverse_iterator rend() const {
+ return const_reverse_iterator(ptr_);
+ }
+
+ size_type max_size() const { return length_; }
+ size_type capacity() const { return length_; }
+
+ void AppendToString(STRING_TYPE* target) const {
+ internal::AppendToString(*this, target);
+ }
+
+ size_type copy(value_type* buf, size_type n, size_type pos = 0) const {
+ return internal::copy(*this, buf, n, pos);
+ }
+
+ // Does "this" start with "x"
+ constexpr bool starts_with(BasicStringPiece x) const noexcept {
+ return (
+ (this->length_ >= x.length_) &&
+ (CharTraits<value_type>::compare(this->ptr_, x.ptr_, x.length_) == 0));
+ }
+
+ // Does "this" end with "x"
+ constexpr bool ends_with(BasicStringPiece x) const noexcept {
+ return ((this->length_ >= x.length_) &&
+ (CharTraits<value_type>::compare(
+ this->ptr_ + (this->length_ - x.length_), x.ptr_, x.length_) ==
+ 0));
+ }
+
+ // find: Search for a character or substring at a given offset.
+ size_type find(const BasicStringPiece<STRING_TYPE>& s,
+ size_type pos = 0) const {
+ return internal::find(*this, s, pos);
+ }
+ size_type find(value_type c, size_type pos = 0) const {
+ return internal::find(*this, c, pos);
+ }
+
+ // rfind: Reverse find.
+ size_type rfind(const BasicStringPiece& s,
+ size_type pos = BasicStringPiece::npos) const {
+ return internal::rfind(*this, s, pos);
+ }
+ size_type rfind(value_type c, size_type pos = BasicStringPiece::npos) const {
+ return internal::rfind(*this, c, pos);
+ }
+
+ // find_first_of: Find the first occurence of one of a set of characters.
+ size_type find_first_of(const BasicStringPiece& s,
+ size_type pos = 0) const {
+ return internal::find_first_of(*this, s, pos);
+ }
+ size_type find_first_of(value_type c, size_type pos = 0) const {
+ return find(c, pos);
+ }
+
+ // find_first_not_of: Find the first occurence not of a set of characters.
+ size_type find_first_not_of(const BasicStringPiece& s,
+ size_type pos = 0) const {
+ return internal::find_first_not_of(*this, s, pos);
+ }
+ size_type find_first_not_of(value_type c, size_type pos = 0) const {
+ return internal::find_first_not_of(*this, c, pos);
+ }
+
+ // find_last_of: Find the last occurence of one of a set of characters.
+ size_type find_last_of(const BasicStringPiece& s,
+ size_type pos = BasicStringPiece::npos) const {
+ return internal::find_last_of(*this, s, pos);
+ }
+ size_type find_last_of(value_type c,
+ size_type pos = BasicStringPiece::npos) const {
+ return rfind(c, pos);
+ }
+
+ // find_last_not_of: Find the last occurence not of a set of characters.
+ size_type find_last_not_of(const BasicStringPiece& s,
+ size_type pos = BasicStringPiece::npos) const {
+ return internal::find_last_not_of(*this, s, pos);
+ }
+ size_type find_last_not_of(value_type c,
+ size_type pos = BasicStringPiece::npos) const {
+ return internal::find_last_not_of(*this, c, pos);
+ }
+
+ // substr.
+ BasicStringPiece substr(size_type pos,
+ size_type n = BasicStringPiece::npos) const {
+ return internal::substr(*this, pos, n);
+ }
+
+ protected:
+ const value_type* ptr_;
+ size_type length_;
+};
+
+template <typename STRING_TYPE>
+const typename BasicStringPiece<STRING_TYPE>::size_type
+BasicStringPiece<STRING_TYPE>::npos =
+ typename BasicStringPiece<STRING_TYPE>::size_type(-1);
+
+// MSVC doesn't like complex extern templates and DLLs.
+#if !defined(COMPILER_MSVC)
+extern template class BASE_EXPORT BasicStringPiece<std::string>;
+extern template class BASE_EXPORT BasicStringPiece<string16>;
+#endif
+
+// Comparison operators --------------------------------------------------------
+// operator ==
+template <typename StringT>
+constexpr bool operator==(BasicStringPiece<StringT> lhs,
+ BasicStringPiece<StringT> rhs) noexcept {
+ return lhs.size() == rhs.size() && lhs.compare(rhs) == 0;
+}
+
+// Here and below we make use of std::common_type_t to emulate an identity type
+// transformation. This creates a non-deduced context, so that we can compare
+// StringPieces with types that implicitly convert to StringPieces. See
+// https://wg21.link/n3766 for details.
+// Furthermore, we require dummy template parameters for these overloads to work
+// around a name mangling issue on Windows.
+template <typename StringT, int = 1>
+constexpr bool operator==(
+ BasicStringPiece<StringT> lhs,
+ std::common_type_t<BasicStringPiece<StringT>> rhs) noexcept {
+ return lhs.size() == rhs.size() && lhs.compare(rhs) == 0;
+}
+
+template <typename StringT, int = 2>
+constexpr bool operator==(std::common_type_t<BasicStringPiece<StringT>> lhs,
+ BasicStringPiece<StringT> rhs) noexcept {
+ return lhs.size() == rhs.size() && lhs.compare(rhs) == 0;
+}
+
+// operator !=
+template <typename StringT>
+constexpr bool operator!=(BasicStringPiece<StringT> lhs,
+ BasicStringPiece<StringT> rhs) noexcept {
+ return !(lhs == rhs);
+}
+
+template <typename StringT, int = 1>
+constexpr bool operator!=(
+ BasicStringPiece<StringT> lhs,
+ std::common_type_t<BasicStringPiece<StringT>> rhs) noexcept {
+ return !(lhs == rhs);
+}
+
+template <typename StringT, int = 2>
+constexpr bool operator!=(std::common_type_t<BasicStringPiece<StringT>> lhs,
+ BasicStringPiece<StringT> rhs) noexcept {
+ return !(lhs == rhs);
+}
+
+// operator <
+template <typename StringT>
+constexpr bool operator<(BasicStringPiece<StringT> lhs,
+ BasicStringPiece<StringT> rhs) noexcept {
+ return lhs.compare(rhs) < 0;
+}
+
+template <typename StringT, int = 1>
+constexpr bool operator<(
+ BasicStringPiece<StringT> lhs,
+ std::common_type_t<BasicStringPiece<StringT>> rhs) noexcept {
+ return lhs.compare(rhs) < 0;
+}
+
+template <typename StringT, int = 2>
+constexpr bool operator<(std::common_type_t<BasicStringPiece<StringT>> lhs,
+ BasicStringPiece<StringT> rhs) noexcept {
+ return lhs.compare(rhs) < 0;
+}
+
+// operator >
+template <typename StringT>
+constexpr bool operator>(BasicStringPiece<StringT> lhs,
+ BasicStringPiece<StringT> rhs) noexcept {
+ return rhs < lhs;
+}
+
+template <typename StringT, int = 1>
+constexpr bool operator>(
+ BasicStringPiece<StringT> lhs,
+ std::common_type_t<BasicStringPiece<StringT>> rhs) noexcept {
+ return rhs < lhs;
+}
+
+template <typename StringT, int = 2>
+constexpr bool operator>(std::common_type_t<BasicStringPiece<StringT>> lhs,
+ BasicStringPiece<StringT> rhs) noexcept {
+ return rhs < lhs;
+}
+
+// operator <=
+template <typename StringT>
+constexpr bool operator<=(BasicStringPiece<StringT> lhs,
+ BasicStringPiece<StringT> rhs) noexcept {
+ return !(rhs < lhs);
+}
+
+template <typename StringT, int = 1>
+constexpr bool operator<=(
+ BasicStringPiece<StringT> lhs,
+ std::common_type_t<BasicStringPiece<StringT>> rhs) noexcept {
+ return !(rhs < lhs);
+}
+
+template <typename StringT, int = 2>
+constexpr bool operator<=(std::common_type_t<BasicStringPiece<StringT>> lhs,
+ BasicStringPiece<StringT> rhs) noexcept {
+ return !(rhs < lhs);
+}
+
+// operator >=
+template <typename StringT>
+constexpr bool operator>=(BasicStringPiece<StringT> lhs,
+ BasicStringPiece<StringT> rhs) noexcept {
+ return !(lhs < rhs);
+}
+
+template <typename StringT, int = 1>
+constexpr bool operator>=(
+ BasicStringPiece<StringT> lhs,
+ std::common_type_t<BasicStringPiece<StringT>> rhs) noexcept {
+ return !(lhs < rhs);
+}
+
+template <typename StringT, int = 2>
+constexpr bool operator>=(std::common_type_t<BasicStringPiece<StringT>> lhs,
+ BasicStringPiece<StringT> rhs) noexcept {
+ return !(lhs < rhs);
+}
+
+BASE_EXPORT std::ostream& operator<<(std::ostream& o,
+ const StringPiece& piece);
+
+BASE_EXPORT std::ostream& operator<<(std::ostream& o,
+ const StringPiece16& piece);
+
+// Hashing ---------------------------------------------------------------------
+
+// We provide appropriate hash functions so StringPiece and StringPiece16 can
+// be used as keys in hash sets and maps.
+
+// This hash function is copied from base/strings/string16.h. We don't use the
+// ones already defined for string and string16 directly because it would
+// require the string constructors to be called, which we don't want.
+
+template <typename StringPieceType>
+struct StringPieceHashImpl {
+ std::size_t operator()(StringPieceType sp) const {
+ std::size_t result = 0;
+ for (auto c : sp)
+ result = (result * 131) + c;
+ return result;
+ }
+};
+
+using StringPieceHash = StringPieceHashImpl<StringPiece>;
+using StringPiece16Hash = StringPieceHashImpl<StringPiece16>;
+using WStringPieceHash = StringPieceHashImpl<WStringPiece>;
+
+} // namespace base
+
+#endif // BASE_STRINGS_STRING_PIECE_H_
diff --git a/security/sandbox/chromium/base/strings/string_piece_forward.h b/security/sandbox/chromium/base/strings/string_piece_forward.h
new file mode 100644
index 0000000000..b50b9806c9
--- /dev/null
+++ b/security/sandbox/chromium/base/strings/string_piece_forward.h
@@ -0,0 +1,24 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Forward declaration of StringPiece types from base/strings/string_piece.h
+
+#ifndef BASE_STRINGS_STRING_PIECE_FORWARD_H_
+#define BASE_STRINGS_STRING_PIECE_FORWARD_H_
+
+#include <string>
+
+#include "base/strings/string16.h"
+
+namespace base {
+
+template <typename STRING_TYPE>
+class BasicStringPiece;
+typedef BasicStringPiece<std::string> StringPiece;
+typedef BasicStringPiece<string16> StringPiece16;
+typedef BasicStringPiece<std::wstring> WStringPiece;
+
+} // namespace base
+
+#endif // BASE_STRINGS_STRING_PIECE_FORWARD_H_
diff --git a/security/sandbox/chromium/base/strings/string_split.cc b/security/sandbox/chromium/base/strings/string_split.cc
new file mode 100644
index 0000000000..42bc5e5b60
--- /dev/null
+++ b/security/sandbox/chromium/base/strings/string_split.cc
@@ -0,0 +1,254 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/string_split.h"
+
+#include <stddef.h>
+
+#include "base/logging.h"
+#include "base/strings/string_util.h"
+#include "base/third_party/icu/icu_utf.h"
+
+namespace base {
+
+namespace {
+
+// Returns either the ASCII or UTF-16 whitespace.
+template<typename Str> BasicStringPiece<Str> WhitespaceForType();
+#if defined(OS_WIN) && defined(BASE_STRING16_IS_STD_U16STRING)
+template <>
+WStringPiece WhitespaceForType<std::wstring>() {
+ return kWhitespaceWide;
+}
+#endif
+
+template<> StringPiece16 WhitespaceForType<string16>() {
+ return kWhitespaceUTF16;
+}
+template<> StringPiece WhitespaceForType<std::string>() {
+ return kWhitespaceASCII;
+}
+
+// General string splitter template. Can take 8- or 16-bit input, can produce
+// the corresponding string or StringPiece output.
+template <typename OutputStringType, typename Str>
+static std::vector<OutputStringType> SplitStringT(
+ BasicStringPiece<Str> str,
+ BasicStringPiece<Str> delimiter,
+ WhitespaceHandling whitespace,
+ SplitResult result_type) {
+ std::vector<OutputStringType> result;
+ if (str.empty())
+ return result;
+
+ size_t start = 0;
+ while (start != Str::npos) {
+ size_t end = str.find_first_of(delimiter, start);
+
+ BasicStringPiece<Str> piece;
+ if (end == Str::npos) {
+ piece = str.substr(start);
+ start = Str::npos;
+ } else {
+ piece = str.substr(start, end - start);
+ start = end + 1;
+ }
+
+ if (whitespace == TRIM_WHITESPACE)
+ piece = TrimString(piece, WhitespaceForType<Str>(), TRIM_ALL);
+
+ if (result_type == SPLIT_WANT_ALL || !piece.empty())
+ result.emplace_back(piece);
+ }
+ return result;
+}
+
+bool AppendStringKeyValue(StringPiece input,
+ char delimiter,
+ StringPairs* result) {
+ // Always append a new item regardless of success (it might be empty). The
+ // below code will copy the strings directly into the result pair.
+ result->resize(result->size() + 1);
+ auto& result_pair = result->back();
+
+ // Find the delimiter.
+ size_t end_key_pos = input.find_first_of(delimiter);
+ if (end_key_pos == std::string::npos) {
+ DVLOG(1) << "cannot find delimiter in: " << input;
+ return false; // No delimiter.
+ }
+ result_pair.first = std::string(input.substr(0, end_key_pos));
+
+ // Find the value string.
+ StringPiece remains = input.substr(end_key_pos, input.size() - end_key_pos);
+ size_t begin_value_pos = remains.find_first_not_of(delimiter);
+ if (begin_value_pos == StringPiece::npos) {
+ DVLOG(1) << "cannot parse value from input: " << input;
+ return false; // No value.
+ }
+
+ result_pair.second = std::string(
+ remains.substr(begin_value_pos, remains.size() - begin_value_pos));
+
+ return true;
+}
+
+template <typename OutputStringType, typename Str>
+std::vector<OutputStringType> SplitStringUsingSubstrT(
+ BasicStringPiece<Str> input,
+ BasicStringPiece<Str> delimiter,
+ WhitespaceHandling whitespace,
+ SplitResult result_type) {
+ using Piece = BasicStringPiece<Str>;
+ using size_type = typename Piece::size_type;
+
+ std::vector<OutputStringType> result;
+ for (size_type begin_index = 0, end_index = 0; end_index != Piece::npos;
+ begin_index = end_index + delimiter.size()) {
+ end_index = input.find(delimiter, begin_index);
+ Piece term = end_index == Piece::npos
+ ? input.substr(begin_index)
+ : input.substr(begin_index, end_index - begin_index);
+
+ if (whitespace == TRIM_WHITESPACE)
+ term = TrimString(term, WhitespaceForType<Str>(), TRIM_ALL);
+
+ if (result_type == SPLIT_WANT_ALL || !term.empty())
+ result.emplace_back(term);
+ }
+
+ return result;
+}
+
+} // namespace
+
+std::vector<std::string> SplitString(StringPiece input,
+ StringPiece separators,
+ WhitespaceHandling whitespace,
+ SplitResult result_type) {
+ return SplitStringT<std::string>(input, separators, whitespace, result_type);
+}
+
+std::vector<string16> SplitString(StringPiece16 input,
+ StringPiece16 separators,
+ WhitespaceHandling whitespace,
+ SplitResult result_type) {
+ return SplitStringT<string16>(input, separators, whitespace, result_type);
+}
+
+std::vector<StringPiece> SplitStringPiece(StringPiece input,
+ StringPiece separators,
+ WhitespaceHandling whitespace,
+ SplitResult result_type) {
+ return SplitStringT<StringPiece>(input, separators, whitespace, result_type);
+}
+
+std::vector<StringPiece16> SplitStringPiece(StringPiece16 input,
+ StringPiece16 separators,
+ WhitespaceHandling whitespace,
+ SplitResult result_type) {
+ return SplitStringT<StringPiece16>(input, separators, whitespace,
+ result_type);
+}
+
+bool SplitStringIntoKeyValuePairs(StringPiece input,
+ char key_value_delimiter,
+ char key_value_pair_delimiter,
+ StringPairs* key_value_pairs) {
+ return SplitStringIntoKeyValuePairsUsingSubstr(
+ input, key_value_delimiter, StringPiece(&key_value_pair_delimiter, 1),
+ key_value_pairs);
+}
+
+bool SplitStringIntoKeyValuePairsUsingSubstr(
+ StringPiece input,
+ char key_value_delimiter,
+ StringPiece key_value_pair_delimiter,
+ StringPairs* key_value_pairs) {
+ key_value_pairs->clear();
+
+ std::vector<StringPiece> pairs = SplitStringPieceUsingSubstr(
+ input, key_value_pair_delimiter, TRIM_WHITESPACE, SPLIT_WANT_NONEMPTY);
+ key_value_pairs->reserve(pairs.size());
+
+ bool success = true;
+ for (const StringPiece& pair : pairs) {
+ if (!AppendStringKeyValue(pair, key_value_delimiter, key_value_pairs)) {
+ // Don't return here, to allow for pairs without associated
+ // value or key; just record that the split failed.
+ success = false;
+ }
+ }
+ return success;
+}
+
+std::vector<string16> SplitStringUsingSubstr(StringPiece16 input,
+ StringPiece16 delimiter,
+ WhitespaceHandling whitespace,
+ SplitResult result_type) {
+ return SplitStringUsingSubstrT<string16>(input, delimiter, whitespace,
+ result_type);
+}
+
+std::vector<std::string> SplitStringUsingSubstr(StringPiece input,
+ StringPiece delimiter,
+ WhitespaceHandling whitespace,
+ SplitResult result_type) {
+ return SplitStringUsingSubstrT<std::string>(input, delimiter, whitespace,
+ result_type);
+}
+
+std::vector<StringPiece16> SplitStringPieceUsingSubstr(
+ StringPiece16 input,
+ StringPiece16 delimiter,
+ WhitespaceHandling whitespace,
+ SplitResult result_type) {
+ std::vector<StringPiece16> result;
+ return SplitStringUsingSubstrT<StringPiece16>(input, delimiter, whitespace,
+ result_type);
+}
+
+std::vector<StringPiece> SplitStringPieceUsingSubstr(
+ StringPiece input,
+ StringPiece delimiter,
+ WhitespaceHandling whitespace,
+ SplitResult result_type) {
+ return SplitStringUsingSubstrT<StringPiece>(input, delimiter, whitespace,
+ result_type);
+}
+
+#if defined(OS_WIN) && defined(BASE_STRING16_IS_STD_U16STRING)
+std::vector<std::wstring> SplitString(WStringPiece input,
+ WStringPiece separators,
+ WhitespaceHandling whitespace,
+ SplitResult result_type) {
+ return SplitStringT<std::wstring>(input, separators, whitespace, result_type);
+}
+
+std::vector<WStringPiece> SplitStringPiece(WStringPiece input,
+ WStringPiece separators,
+ WhitespaceHandling whitespace,
+ SplitResult result_type) {
+ return SplitStringT<WStringPiece>(input, separators, whitespace, result_type);
+}
+
+std::vector<std::wstring> SplitStringUsingSubstr(WStringPiece input,
+ WStringPiece delimiter,
+ WhitespaceHandling whitespace,
+ SplitResult result_type) {
+ return SplitStringUsingSubstrT<std::wstring>(input, delimiter, whitespace,
+ result_type);
+}
+
+std::vector<WStringPiece> SplitStringPieceUsingSubstr(
+ WStringPiece input,
+ WStringPiece delimiter,
+ WhitespaceHandling whitespace,
+ SplitResult result_type) {
+ return SplitStringUsingSubstrT<WStringPiece>(input, delimiter, whitespace,
+ result_type);
+}
+#endif
+
+} // namespace base
diff --git a/security/sandbox/chromium/base/strings/string_split.h b/security/sandbox/chromium/base/strings/string_split.h
new file mode 100644
index 0000000000..efa8b199fe
--- /dev/null
+++ b/security/sandbox/chromium/base/strings/string_split.h
@@ -0,0 +1,169 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_STRINGS_STRING_SPLIT_H_
+#define BASE_STRINGS_STRING_SPLIT_H_
+
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/strings/string16.h"
+#include "base/strings/string_piece.h"
+#include "build/build_config.h"
+
+namespace base {
+
+enum WhitespaceHandling {
+ KEEP_WHITESPACE,
+ TRIM_WHITESPACE,
+};
+
+enum SplitResult {
+ // Strictly return all results.
+ //
+ // If the input is ",," and the separator is ',' this will return a
+ // vector of three empty strings.
+ SPLIT_WANT_ALL,
+
+ // Only nonempty results will be added to the results. Multiple separators
+ // will be coalesced. Separators at the beginning and end of the input will
+ // be ignored. With TRIM_WHITESPACE, whitespace-only results will be dropped.
+ //
+ // If the input is ",," and the separator is ',', this will return an empty
+ // vector.
+ SPLIT_WANT_NONEMPTY,
+};
+
+// Split the given string on ANY of the given separators, returning copies of
+// the result.
+//
+// Note this is inverse of JoinString() defined in string_util.h.
+//
+// To split on either commas or semicolons, keeping all whitespace:
+//
+// std::vector<std::string> tokens = base::SplitString(
+// input, ", WARN_UNUSED_RESULT;", base::KEEP_WHITESPACE,
+// base::SPLIT_WANT_ALL) WARN_UNUSED_RESULT;
+BASE_EXPORT std::vector<std::string> SplitString(StringPiece input,
+ StringPiece separators,
+ WhitespaceHandling whitespace,
+ SplitResult result_type)
+ WARN_UNUSED_RESULT;
+BASE_EXPORT std::vector<string16> SplitString(StringPiece16 input,
+ StringPiece16 separators,
+ WhitespaceHandling whitespace,
+ SplitResult result_type)
+ WARN_UNUSED_RESULT;
+
+// Like SplitString above except it returns a vector of StringPieces which
+// reference the original buffer without copying. Although you have to be
+// careful to keep the original string unmodified, this provides an efficient
+// way to iterate through tokens in a string.
+//
+// Note this is inverse of JoinString() defined in string_util.h.
+//
+// To iterate through all whitespace-separated tokens in an input string:
+//
+// for (const auto& cur :
+// base::SplitStringPiece(input, base::kWhitespaceASCII,
+// base::KEEP_WHITESPACE,
+// base::SPLIT_WANT_NONEMPTY)) {
+// ...
+BASE_EXPORT std::vector<StringPiece> SplitStringPiece(
+ StringPiece input,
+ StringPiece separators,
+ WhitespaceHandling whitespace,
+ SplitResult result_type) WARN_UNUSED_RESULT;
+BASE_EXPORT std::vector<StringPiece16> SplitStringPiece(
+ StringPiece16 input,
+ StringPiece16 separators,
+ WhitespaceHandling whitespace,
+ SplitResult result_type) WARN_UNUSED_RESULT;
+
+using StringPairs = std::vector<std::pair<std::string, std::string>>;
+
+// Splits |line| into key value pairs according to the given delimiters and
+// removes whitespace leading each key and trailing each value. Returns true
+// only if each pair has a non-empty key and value. |key_value_pairs| will
+// include ("","") pairs for entries without |key_value_delimiter|.
+BASE_EXPORT bool SplitStringIntoKeyValuePairs(StringPiece input,
+ char key_value_delimiter,
+ char key_value_pair_delimiter,
+ StringPairs* key_value_pairs);
+
+// Similar to SplitStringIntoKeyValuePairs, but use a substring
+// |key_value_pair_delimiter| instead of a single char.
+BASE_EXPORT bool SplitStringIntoKeyValuePairsUsingSubstr(
+ StringPiece input,
+ char key_value_delimiter,
+ StringPiece key_value_pair_delimiter,
+ StringPairs* key_value_pairs);
+
+// Similar to SplitString, but use a substring delimiter instead of a list of
+// characters that are all possible delimiters.
+BASE_EXPORT std::vector<string16> SplitStringUsingSubstr(
+ StringPiece16 input,
+ StringPiece16 delimiter,
+ WhitespaceHandling whitespace,
+ SplitResult result_type) WARN_UNUSED_RESULT;
+BASE_EXPORT std::vector<std::string> SplitStringUsingSubstr(
+ StringPiece input,
+ StringPiece delimiter,
+ WhitespaceHandling whitespace,
+ SplitResult result_type) WARN_UNUSED_RESULT;
+
+// Like SplitStringUsingSubstr above except it returns a vector of StringPieces
+// which reference the original buffer without copying. Although you have to be
+// careful to keep the original string unmodified, this provides an efficient
+// way to iterate through tokens in a string.
+//
+// To iterate through all newline-separated tokens in an input string:
+//
+// for (const auto& cur :
+// base::SplitStringUsingSubstr(input, "\r\n",
+// base::KEEP_WHITESPACE,
+// base::SPLIT_WANT_NONEMPTY)) {
+// ...
+BASE_EXPORT std::vector<StringPiece16> SplitStringPieceUsingSubstr(
+ StringPiece16 input,
+ StringPiece16 delimiter,
+ WhitespaceHandling whitespace,
+ SplitResult result_type) WARN_UNUSED_RESULT;
+BASE_EXPORT std::vector<StringPiece> SplitStringPieceUsingSubstr(
+ StringPiece input,
+ StringPiece delimiter,
+ WhitespaceHandling whitespace,
+ SplitResult result_type) WARN_UNUSED_RESULT;
+
+#if defined(OS_WIN) && defined(BASE_STRING16_IS_STD_U16STRING)
+BASE_EXPORT std::vector<std::wstring> SplitString(WStringPiece input,
+ WStringPiece separators,
+ WhitespaceHandling whitespace,
+ SplitResult result_type)
+ WARN_UNUSED_RESULT;
+
+BASE_EXPORT std::vector<WStringPiece> SplitStringPiece(
+ WStringPiece input,
+ WStringPiece separators,
+ WhitespaceHandling whitespace,
+ SplitResult result_type) WARN_UNUSED_RESULT;
+
+BASE_EXPORT std::vector<std::wstring> SplitStringUsingSubstr(
+ WStringPiece input,
+ WStringPiece delimiter,
+ WhitespaceHandling whitespace,
+ SplitResult result_type) WARN_UNUSED_RESULT;
+
+BASE_EXPORT std::vector<WStringPiece> SplitStringPieceUsingSubstr(
+ WStringPiece input,
+ WStringPiece delimiter,
+ WhitespaceHandling whitespace,
+ SplitResult result_type) WARN_UNUSED_RESULT;
+#endif
+
+} // namespace base
+
+#endif // BASE_STRINGS_STRING_SPLIT_H_
diff --git a/security/sandbox/chromium/base/strings/string_util.cc b/security/sandbox/chromium/base/strings/string_util.cc
new file mode 100644
index 0000000000..4e4be06e88
--- /dev/null
+++ b/security/sandbox/chromium/base/strings/string_util.cc
@@ -0,0 +1,1057 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/string_util.h"
+
+#include <ctype.h>
+#include <errno.h>
+#include <math.h>
+#include <stdarg.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <wchar.h>
+#include <wctype.h>
+
+#include <algorithm>
+#include <limits>
+#include <vector>
+
+#include "base/logging.h"
+#include "base/no_destructor.h"
+#include "base/stl_util.h"
+#include "base/strings/utf_string_conversion_utils.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/third_party/icu/icu_utf.h"
+#include "build/build_config.h"
+
+namespace base {
+
+namespace {
+
+// Overloaded function to append one string onto the end of another. Having a
+// separate overload for |source| as both string and StringPiece allows for more
+// efficient usage from functions templated to work with either type (avoiding a
+// redundant call to the BasicStringPiece constructor in both cases).
+template <typename string_type>
+inline void AppendToString(string_type* target, const string_type& source) {
+ target->append(source);
+}
+
+template <typename string_type>
+inline void AppendToString(string_type* target,
+ const BasicStringPiece<string_type>& source) {
+ source.AppendToString(target);
+}
+
+// Assuming that a pointer is the size of a "machine word", then
+// uintptr_t is an integer type that is also a machine word.
+using MachineWord = uintptr_t;
+
+inline bool IsMachineWordAligned(const void* pointer) {
+ return !(reinterpret_cast<MachineWord>(pointer) & (sizeof(MachineWord) - 1));
+}
+
+template <typename CharacterType>
+struct NonASCIIMask;
+template <>
+struct NonASCIIMask<char> {
+ static constexpr MachineWord value() {
+ return static_cast<MachineWord>(0x8080808080808080ULL);
+ }
+};
+template <>
+struct NonASCIIMask<char16> {
+ static constexpr MachineWord value() {
+ return static_cast<MachineWord>(0xFF80FF80FF80FF80ULL);
+ }
+};
+#if defined(WCHAR_T_IS_UTF32)
+template <>
+struct NonASCIIMask<wchar_t> {
+ static constexpr MachineWord value() {
+ return static_cast<MachineWord>(0xFFFFFF80FFFFFF80ULL);
+ }
+};
+#endif // WCHAR_T_IS_UTF32
+
+} // namespace
+
+bool IsWprintfFormatPortable(const wchar_t* format) {
+ for (const wchar_t* position = format; *position != '\0'; ++position) {
+ if (*position == '%') {
+ bool in_specification = true;
+ bool modifier_l = false;
+ while (in_specification) {
+ // Eat up characters until reaching a known specifier.
+ if (*++position == '\0') {
+ // The format string ended in the middle of a specification. Call
+ // it portable because no unportable specifications were found. The
+ // string is equally broken on all platforms.
+ return true;
+ }
+
+ if (*position == 'l') {
+ // 'l' is the only thing that can save the 's' and 'c' specifiers.
+ modifier_l = true;
+ } else if (((*position == 's' || *position == 'c') && !modifier_l) ||
+ *position == 'S' || *position == 'C' || *position == 'F' ||
+ *position == 'D' || *position == 'O' || *position == 'U') {
+ // Not portable.
+ return false;
+ }
+
+ if (wcschr(L"diouxXeEfgGaAcspn%", *position)) {
+ // Portable, keep scanning the rest of the format string.
+ in_specification = false;
+ }
+ }
+ }
+ }
+
+ return true;
+}
+
+namespace {
+
+template<typename StringType>
+StringType ToLowerASCIIImpl(BasicStringPiece<StringType> str) {
+ StringType ret;
+ ret.reserve(str.size());
+ for (size_t i = 0; i < str.size(); i++)
+ ret.push_back(ToLowerASCII(str[i]));
+ return ret;
+}
+
+template<typename StringType>
+StringType ToUpperASCIIImpl(BasicStringPiece<StringType> str) {
+ StringType ret;
+ ret.reserve(str.size());
+ for (size_t i = 0; i < str.size(); i++)
+ ret.push_back(ToUpperASCII(str[i]));
+ return ret;
+}
+
+} // namespace
+
+std::string ToLowerASCII(StringPiece str) {
+ return ToLowerASCIIImpl<std::string>(str);
+}
+
+string16 ToLowerASCII(StringPiece16 str) {
+ return ToLowerASCIIImpl<string16>(str);
+}
+
+std::string ToUpperASCII(StringPiece str) {
+ return ToUpperASCIIImpl<std::string>(str);
+}
+
+string16 ToUpperASCII(StringPiece16 str) {
+ return ToUpperASCIIImpl<string16>(str);
+}
+
+template<class StringType>
+int CompareCaseInsensitiveASCIIT(BasicStringPiece<StringType> a,
+ BasicStringPiece<StringType> b) {
+ // Find the first characters that aren't equal and compare them. If the end
+ // of one of the strings is found before a nonequal character, the lengths
+ // of the strings are compared.
+ size_t i = 0;
+ while (i < a.length() && i < b.length()) {
+ typename StringType::value_type lower_a = ToLowerASCII(a[i]);
+ typename StringType::value_type lower_b = ToLowerASCII(b[i]);
+ if (lower_a < lower_b)
+ return -1;
+ if (lower_a > lower_b)
+ return 1;
+ i++;
+ }
+
+ // End of one string hit before finding a different character. Expect the
+ // common case to be "strings equal" at this point so check that first.
+ if (a.length() == b.length())
+ return 0;
+
+ if (a.length() < b.length())
+ return -1;
+ return 1;
+}
+
+int CompareCaseInsensitiveASCII(StringPiece a, StringPiece b) {
+ return CompareCaseInsensitiveASCIIT<std::string>(a, b);
+}
+
+int CompareCaseInsensitiveASCII(StringPiece16 a, StringPiece16 b) {
+ return CompareCaseInsensitiveASCIIT<string16>(a, b);
+}
+
+bool EqualsCaseInsensitiveASCII(StringPiece a, StringPiece b) {
+ if (a.length() != b.length())
+ return false;
+ return CompareCaseInsensitiveASCIIT<std::string>(a, b) == 0;
+}
+
+bool EqualsCaseInsensitiveASCII(StringPiece16 a, StringPiece16 b) {
+ if (a.length() != b.length())
+ return false;
+ return CompareCaseInsensitiveASCIIT<string16>(a, b) == 0;
+}
+
+const std::string& EmptyString() {
+ static const base::NoDestructor<std::string> s;
+ return *s;
+}
+
+const string16& EmptyString16() {
+ static const base::NoDestructor<string16> s16;
+ return *s16;
+}
+
+template <class StringType>
+bool ReplaceCharsT(const StringType& input,
+ BasicStringPiece<StringType> find_any_of_these,
+ BasicStringPiece<StringType> replace_with,
+ StringType* output);
+
+bool ReplaceChars(const string16& input,
+ StringPiece16 replace_chars,
+ StringPiece16 replace_with,
+ string16* output) {
+ return ReplaceCharsT(input, replace_chars, replace_with, output);
+}
+
+bool ReplaceChars(const std::string& input,
+ StringPiece replace_chars,
+ StringPiece replace_with,
+ std::string* output) {
+ return ReplaceCharsT(input, replace_chars, replace_with, output);
+}
+
+bool RemoveChars(const string16& input,
+ StringPiece16 remove_chars,
+ string16* output) {
+ return ReplaceCharsT(input, remove_chars, StringPiece16(), output);
+}
+
+bool RemoveChars(const std::string& input,
+ StringPiece remove_chars,
+ std::string* output) {
+ return ReplaceCharsT(input, remove_chars, StringPiece(), output);
+}
+
+template <typename Str>
+TrimPositions TrimStringT(BasicStringPiece<Str> input,
+ BasicStringPiece<Str> trim_chars,
+ TrimPositions positions,
+ Str* output) {
+ // Find the edges of leading/trailing whitespace as desired. Need to use
+ // a StringPiece version of input to be able to call find* on it with the
+ // StringPiece version of trim_chars (normally the trim_chars will be a
+ // constant so avoid making a copy).
+ const size_t last_char = input.length() - 1;
+ const size_t first_good_char =
+ (positions & TRIM_LEADING) ? input.find_first_not_of(trim_chars) : 0;
+ const size_t last_good_char = (positions & TRIM_TRAILING)
+ ? input.find_last_not_of(trim_chars)
+ : last_char;
+
+ // When the string was all trimmed, report that we stripped off characters
+ // from whichever position the caller was interested in. For empty input, we
+ // stripped no characters, but we still need to clear |output|.
+ if (input.empty() || first_good_char == Str::npos ||
+ last_good_char == Str::npos) {
+ bool input_was_empty = input.empty(); // in case output == &input
+ output->clear();
+ return input_was_empty ? TRIM_NONE : positions;
+ }
+
+ // Trim.
+ output->assign(input.data() + first_good_char,
+ last_good_char - first_good_char + 1);
+
+ // Return where we trimmed from.
+ return static_cast<TrimPositions>(
+ (first_good_char == 0 ? TRIM_NONE : TRIM_LEADING) |
+ (last_good_char == last_char ? TRIM_NONE : TRIM_TRAILING));
+}
+
+bool TrimString(StringPiece16 input,
+ StringPiece16 trim_chars,
+ string16* output) {
+ return TrimStringT(input, trim_chars, TRIM_ALL, output) != TRIM_NONE;
+}
+
+bool TrimString(StringPiece input,
+ StringPiece trim_chars,
+ std::string* output) {
+ return TrimStringT(input, trim_chars, TRIM_ALL, output) != TRIM_NONE;
+}
+
+template<typename Str>
+BasicStringPiece<Str> TrimStringPieceT(BasicStringPiece<Str> input,
+ BasicStringPiece<Str> trim_chars,
+ TrimPositions positions) {
+ size_t begin = (positions & TRIM_LEADING) ?
+ input.find_first_not_of(trim_chars) : 0;
+ size_t end = (positions & TRIM_TRAILING) ?
+ input.find_last_not_of(trim_chars) + 1 : input.size();
+ return input.substr(begin, end - begin);
+}
+
+StringPiece16 TrimString(StringPiece16 input,
+ StringPiece16 trim_chars,
+ TrimPositions positions) {
+ return TrimStringPieceT(input, trim_chars, positions);
+}
+
+StringPiece TrimString(StringPiece input,
+ StringPiece trim_chars,
+ TrimPositions positions) {
+ return TrimStringPieceT(input, trim_chars, positions);
+}
+
+void TruncateUTF8ToByteSize(const std::string& input,
+ const size_t byte_size,
+ std::string* output) {
+ DCHECK(output);
+ if (byte_size > input.length()) {
+ *output = input;
+ return;
+ }
+ DCHECK_LE(byte_size,
+ static_cast<uint32_t>(std::numeric_limits<int32_t>::max()));
+ // Note: This cast is necessary because CBU8_NEXT uses int32_ts.
+ int32_t truncation_length = static_cast<int32_t>(byte_size);
+ int32_t char_index = truncation_length - 1;
+ const char* data = input.data();
+
+ // Using CBU8, we will move backwards from the truncation point
+ // to the beginning of the string looking for a valid UTF8
+ // character. Once a full UTF8 character is found, we will
+ // truncate the string to the end of that character.
+ while (char_index >= 0) {
+ int32_t prev = char_index;
+ base_icu::UChar32 code_point = 0;
+ CBU8_NEXT(data, char_index, truncation_length, code_point);
+ if (!IsValidCharacter(code_point) ||
+ !IsValidCodepoint(code_point)) {
+ char_index = prev - 1;
+ } else {
+ break;
+ }
+ }
+
+ if (char_index >= 0 )
+ *output = input.substr(0, char_index);
+ else
+ output->clear();
+}
+
+TrimPositions TrimWhitespace(StringPiece16 input,
+ TrimPositions positions,
+ string16* output) {
+ return TrimStringT(input, StringPiece16(kWhitespaceUTF16), positions, output);
+}
+
+StringPiece16 TrimWhitespace(StringPiece16 input,
+ TrimPositions positions) {
+ return TrimStringPieceT(input, StringPiece16(kWhitespaceUTF16), positions);
+}
+
+TrimPositions TrimWhitespaceASCII(StringPiece input,
+ TrimPositions positions,
+ std::string* output) {
+ return TrimStringT(input, StringPiece(kWhitespaceASCII), positions, output);
+}
+
+StringPiece TrimWhitespaceASCII(StringPiece input, TrimPositions positions) {
+ return TrimStringPieceT(input, StringPiece(kWhitespaceASCII), positions);
+}
+
+template<typename STR>
+STR CollapseWhitespaceT(const STR& text,
+ bool trim_sequences_with_line_breaks) {
+ STR result;
+ result.resize(text.size());
+
+ // Set flags to pretend we're already in a trimmed whitespace sequence, so we
+ // will trim any leading whitespace.
+ bool in_whitespace = true;
+ bool already_trimmed = true;
+
+ int chars_written = 0;
+ for (typename STR::const_iterator i(text.begin()); i != text.end(); ++i) {
+ if (IsUnicodeWhitespace(*i)) {
+ if (!in_whitespace) {
+ // Reduce all whitespace sequences to a single space.
+ in_whitespace = true;
+ result[chars_written++] = L' ';
+ }
+ if (trim_sequences_with_line_breaks && !already_trimmed &&
+ ((*i == '\n') || (*i == '\r'))) {
+ // Whitespace sequences containing CR or LF are eliminated entirely.
+ already_trimmed = true;
+ --chars_written;
+ }
+ } else {
+ // Non-whitespace chracters are copied straight across.
+ in_whitespace = false;
+ already_trimmed = false;
+ result[chars_written++] = *i;
+ }
+ }
+
+ if (in_whitespace && !already_trimmed) {
+ // Any trailing whitespace is eliminated.
+ --chars_written;
+ }
+
+ result.resize(chars_written);
+ return result;
+}
+
+string16 CollapseWhitespace(const string16& text,
+ bool trim_sequences_with_line_breaks) {
+ return CollapseWhitespaceT(text, trim_sequences_with_line_breaks);
+}
+
+std::string CollapseWhitespaceASCII(const std::string& text,
+ bool trim_sequences_with_line_breaks) {
+ return CollapseWhitespaceT(text, trim_sequences_with_line_breaks);
+}
+
+bool ContainsOnlyChars(StringPiece input, StringPiece characters) {
+ return input.find_first_not_of(characters) == StringPiece::npos;
+}
+
+bool ContainsOnlyChars(StringPiece16 input, StringPiece16 characters) {
+ return input.find_first_not_of(characters) == StringPiece16::npos;
+}
+
+template <class Char>
+inline bool DoIsStringASCII(const Char* characters, size_t length) {
+ if (!length)
+ return true;
+ constexpr MachineWord non_ascii_bit_mask = NonASCIIMask<Char>::value();
+ MachineWord all_char_bits = 0;
+ const Char* end = characters + length;
+
+ // Prologue: align the input.
+ while (!IsMachineWordAligned(characters) && characters < end)
+ all_char_bits |= *characters++;
+ if (all_char_bits & non_ascii_bit_mask)
+ return false;
+
+ // Compare the values of CPU word size.
+ constexpr size_t chars_per_word = sizeof(MachineWord) / sizeof(Char);
+ constexpr int batch_count = 16;
+ while (characters <= end - batch_count * chars_per_word) {
+ all_char_bits = 0;
+ for (int i = 0; i < batch_count; ++i) {
+ all_char_bits |= *(reinterpret_cast<const MachineWord*>(characters));
+ characters += chars_per_word;
+ }
+ if (all_char_bits & non_ascii_bit_mask)
+ return false;
+ }
+
+ // Process the remaining words.
+ all_char_bits = 0;
+ while (characters <= end - chars_per_word) {
+ all_char_bits |= *(reinterpret_cast<const MachineWord*>(characters));
+ characters += chars_per_word;
+ }
+
+ // Process the remaining bytes.
+ while (characters < end)
+ all_char_bits |= *characters++;
+
+ return !(all_char_bits & non_ascii_bit_mask);
+}
+
+bool IsStringASCII(StringPiece str) {
+ return DoIsStringASCII(str.data(), str.length());
+}
+
+bool IsStringASCII(StringPiece16 str) {
+ return DoIsStringASCII(str.data(), str.length());
+}
+
+#if defined(WCHAR_T_IS_UTF32)
+bool IsStringASCII(WStringPiece str) {
+ return DoIsStringASCII(str.data(), str.length());
+}
+#endif
+
+template <bool (*Validator)(uint32_t)>
+inline static bool DoIsStringUTF8(StringPiece str) {
+ const char* src = str.data();
+ int32_t src_len = static_cast<int32_t>(str.length());
+ int32_t char_index = 0;
+
+ while (char_index < src_len) {
+ int32_t code_point;
+ CBU8_NEXT(src, char_index, src_len, code_point);
+ if (!Validator(code_point))
+ return false;
+ }
+ return true;
+}
+
+bool IsStringUTF8(StringPiece str) {
+ return DoIsStringUTF8<IsValidCharacter>(str);
+}
+
+bool IsStringUTF8AllowingNoncharacters(StringPiece str) {
+ return DoIsStringUTF8<IsValidCodepoint>(str);
+}
+
+// Implementation note: Normally this function will be called with a hardcoded
+// constant for the lowercase_ascii parameter. Constructing a StringPiece from
+// a C constant requires running strlen, so the result will be two passes
+// through the buffers, one to file the length of lowercase_ascii, and one to
+// compare each letter.
+//
+// This function could have taken a const char* to avoid this and only do one
+// pass through the string. But the strlen is faster than the case-insensitive
+// compares and lets us early-exit in the case that the strings are different
+// lengths (will often be the case for non-matches). So whether one approach or
+// the other will be faster depends on the case.
+//
+// The hardcoded strings are typically very short so it doesn't matter, and the
+// string piece gives additional flexibility for the caller (doesn't have to be
+// null terminated) so we choose the StringPiece route.
+template<typename Str>
+static inline bool DoLowerCaseEqualsASCII(BasicStringPiece<Str> str,
+ StringPiece lowercase_ascii) {
+ if (str.size() != lowercase_ascii.size())
+ return false;
+ for (size_t i = 0; i < str.size(); i++) {
+ if (ToLowerASCII(str[i]) != lowercase_ascii[i])
+ return false;
+ }
+ return true;
+}
+
+bool LowerCaseEqualsASCII(StringPiece str, StringPiece lowercase_ascii) {
+ return DoLowerCaseEqualsASCII<std::string>(str, lowercase_ascii);
+}
+
+bool LowerCaseEqualsASCII(StringPiece16 str, StringPiece lowercase_ascii) {
+ return DoLowerCaseEqualsASCII<string16>(str, lowercase_ascii);
+}
+
+bool EqualsASCII(StringPiece16 str, StringPiece ascii) {
+ if (str.length() != ascii.length())
+ return false;
+ return std::equal(ascii.begin(), ascii.end(), str.begin());
+}
+
+template<typename Str>
+bool StartsWithT(BasicStringPiece<Str> str,
+ BasicStringPiece<Str> search_for,
+ CompareCase case_sensitivity) {
+ if (search_for.size() > str.size())
+ return false;
+
+ BasicStringPiece<Str> source = str.substr(0, search_for.size());
+
+ switch (case_sensitivity) {
+ case CompareCase::SENSITIVE:
+ return source == search_for;
+
+ case CompareCase::INSENSITIVE_ASCII:
+ return std::equal(
+ search_for.begin(), search_for.end(),
+ source.begin(),
+ CaseInsensitiveCompareASCII<typename Str::value_type>());
+
+ default:
+ NOTREACHED();
+ return false;
+ }
+}
+
+bool StartsWith(StringPiece str,
+ StringPiece search_for,
+ CompareCase case_sensitivity) {
+ return StartsWithT<std::string>(str, search_for, case_sensitivity);
+}
+
+bool StartsWith(StringPiece16 str,
+ StringPiece16 search_for,
+ CompareCase case_sensitivity) {
+ return StartsWithT<string16>(str, search_for, case_sensitivity);
+}
+
+template <typename Str>
+bool EndsWithT(BasicStringPiece<Str> str,
+ BasicStringPiece<Str> search_for,
+ CompareCase case_sensitivity) {
+ if (search_for.size() > str.size())
+ return false;
+
+ BasicStringPiece<Str> source = str.substr(str.size() - search_for.size(),
+ search_for.size());
+
+ switch (case_sensitivity) {
+ case CompareCase::SENSITIVE:
+ return source == search_for;
+
+ case CompareCase::INSENSITIVE_ASCII:
+ return std::equal(
+ source.begin(), source.end(),
+ search_for.begin(),
+ CaseInsensitiveCompareASCII<typename Str::value_type>());
+
+ default:
+ NOTREACHED();
+ return false;
+ }
+}
+
+bool EndsWith(StringPiece str,
+ StringPiece search_for,
+ CompareCase case_sensitivity) {
+ return EndsWithT<std::string>(str, search_for, case_sensitivity);
+}
+
+bool EndsWith(StringPiece16 str,
+ StringPiece16 search_for,
+ CompareCase case_sensitivity) {
+ return EndsWithT<string16>(str, search_for, case_sensitivity);
+}
+
+char HexDigitToInt(wchar_t c) {
+ DCHECK(IsHexDigit(c));
+ if (c >= '0' && c <= '9')
+ return static_cast<char>(c - '0');
+ if (c >= 'A' && c <= 'F')
+ return static_cast<char>(c - 'A' + 10);
+ if (c >= 'a' && c <= 'f')
+ return static_cast<char>(c - 'a' + 10);
+ return 0;
+}
+
+bool IsUnicodeWhitespace(wchar_t c) {
+ // kWhitespaceWide is a NULL-terminated string
+ for (const wchar_t* cur = kWhitespaceWide; *cur; ++cur) {
+ if (*cur == c)
+ return true;
+ }
+ return false;
+}
+
+static const char* const kByteStringsUnlocalized[] = {
+ " B",
+ " kB",
+ " MB",
+ " GB",
+ " TB",
+ " PB"
+};
+
+string16 FormatBytesUnlocalized(int64_t bytes) {
+ double unit_amount = static_cast<double>(bytes);
+ size_t dimension = 0;
+ const int kKilo = 1024;
+ while (unit_amount >= kKilo &&
+ dimension < base::size(kByteStringsUnlocalized) - 1) {
+ unit_amount /= kKilo;
+ dimension++;
+ }
+
+ char buf[64];
+ if (bytes != 0 && dimension > 0 && unit_amount < 100) {
+ base::snprintf(buf, base::size(buf), "%.1lf%s", unit_amount,
+ kByteStringsUnlocalized[dimension]);
+ } else {
+ base::snprintf(buf, base::size(buf), "%.0lf%s", unit_amount,
+ kByteStringsUnlocalized[dimension]);
+ }
+
+ return ASCIIToUTF16(buf);
+}
+
+// A Matcher for DoReplaceMatchesAfterOffset() that matches substrings.
+template <class StringType>
+struct SubstringMatcher {
+ BasicStringPiece<StringType> find_this;
+
+ size_t Find(const StringType& input, size_t pos) {
+ return input.find(find_this.data(), pos, find_this.length());
+ }
+ size_t MatchSize() { return find_this.length(); }
+};
+
+// A Matcher for DoReplaceMatchesAfterOffset() that matches single characters.
+template <class StringType>
+struct CharacterMatcher {
+ BasicStringPiece<StringType> find_any_of_these;
+
+ size_t Find(const StringType& input, size_t pos) {
+ return input.find_first_of(find_any_of_these.data(), pos,
+ find_any_of_these.length());
+ }
+ constexpr size_t MatchSize() { return 1; }
+};
+
+enum class ReplaceType { REPLACE_ALL, REPLACE_FIRST };
+
+// Runs in O(n) time in the length of |str|, and transforms the string without
+// reallocating when possible. Returns |true| if any matches were found.
+//
+// This is parameterized on a |Matcher| traits type, so that it can be the
+// implementation for both ReplaceChars() and ReplaceSubstringsAfterOffset().
+template <class StringType, class Matcher>
+bool DoReplaceMatchesAfterOffset(StringType* str,
+ size_t initial_offset,
+ Matcher matcher,
+ BasicStringPiece<StringType> replace_with,
+ ReplaceType replace_type) {
+ using CharTraits = typename StringType::traits_type;
+
+ const size_t find_length = matcher.MatchSize();
+ if (!find_length)
+ return false;
+
+ // If the find string doesn't appear, there's nothing to do.
+ size_t first_match = matcher.Find(*str, initial_offset);
+ if (first_match == StringType::npos)
+ return false;
+
+ // If we're only replacing one instance, there's no need to do anything
+ // complicated.
+ const size_t replace_length = replace_with.length();
+ if (replace_type == ReplaceType::REPLACE_FIRST) {
+ str->replace(first_match, find_length, replace_with.data(), replace_length);
+ return true;
+ }
+
+ // If the find and replace strings are the same length, we can simply use
+ // replace() on each instance, and finish the entire operation in O(n) time.
+ if (find_length == replace_length) {
+ auto* buffer = &((*str)[0]);
+ for (size_t offset = first_match; offset != StringType::npos;
+ offset = matcher.Find(*str, offset + replace_length)) {
+ CharTraits::copy(buffer + offset, replace_with.data(), replace_length);
+ }
+ return true;
+ }
+
+ // Since the find and replace strings aren't the same length, a loop like the
+ // one above would be O(n^2) in the worst case, as replace() will shift the
+ // entire remaining string each time. We need to be more clever to keep things
+ // O(n).
+ //
+ // When the string is being shortened, it's possible to just shift the matches
+ // down in one pass while finding, and truncate the length at the end of the
+ // search.
+ //
+ // If the string is being lengthened, more work is required. The strategy used
+ // here is to make two find() passes through the string. The first pass counts
+ // the number of matches to determine the new size. The second pass will
+ // either construct the new string into a new buffer (if the existing buffer
+ // lacked capacity), or else -- if there is room -- create a region of scratch
+ // space after |first_match| by shifting the tail of the string to a higher
+ // index, and doing in-place moves from the tail to lower indices thereafter.
+ size_t str_length = str->length();
+ size_t expansion = 0;
+ if (replace_length > find_length) {
+ // This operation lengthens the string; determine the new length by counting
+ // matches.
+ const size_t expansion_per_match = (replace_length - find_length);
+ size_t num_matches = 0;
+ for (size_t match = first_match; match != StringType::npos;
+ match = matcher.Find(*str, match + find_length)) {
+ expansion += expansion_per_match;
+ ++num_matches;
+ }
+ const size_t final_length = str_length + expansion;
+
+ if (str->capacity() < final_length) {
+ // If we'd have to allocate a new buffer to grow the string, build the
+ // result directly into the new allocation via append().
+ StringType src(str->get_allocator());
+ str->swap(src);
+ str->reserve(final_length);
+
+ size_t pos = 0;
+ for (size_t match = first_match;; match = matcher.Find(src, pos)) {
+ str->append(src, pos, match - pos);
+ str->append(replace_with.data(), replace_length);
+ pos = match + find_length;
+
+ // A mid-loop test/break enables skipping the final Find() call; the
+ // number of matches is known, so don't search past the last one.
+ if (!--num_matches)
+ break;
+ }
+
+ // Handle substring after the final match.
+ str->append(src, pos, str_length - pos);
+ return true;
+ }
+
+ // Prepare for the copy/move loop below -- expand the string to its final
+ // size by shifting the data after the first match to the end of the resized
+ // string.
+ size_t shift_src = first_match + find_length;
+ size_t shift_dst = shift_src + expansion;
+
+ // Big |expansion| factors (relative to |str_length|) require padding up to
+ // |shift_dst|.
+ if (shift_dst > str_length)
+ str->resize(shift_dst);
+
+ str->replace(shift_dst, str_length - shift_src, *str, shift_src,
+ str_length - shift_src);
+ str_length = final_length;
+ }
+
+ // We can alternate replacement and move operations. This won't overwrite the
+ // unsearched region of the string so long as |write_offset| <= |read_offset|;
+ // that condition is always satisfied because:
+ //
+ // (a) If the string is being shortened, |expansion| is zero and
+ // |write_offset| grows slower than |read_offset|.
+ //
+ // (b) If the string is being lengthened, |write_offset| grows faster than
+ // |read_offset|, but |expansion| is big enough so that |write_offset|
+ // will only catch up to |read_offset| at the point of the last match.
+ auto* buffer = &((*str)[0]);
+ size_t write_offset = first_match;
+ size_t read_offset = first_match + expansion;
+ do {
+ if (replace_length) {
+ CharTraits::copy(buffer + write_offset, replace_with.data(),
+ replace_length);
+ write_offset += replace_length;
+ }
+ read_offset += find_length;
+
+ // min() clamps StringType::npos (the largest unsigned value) to str_length.
+ size_t match = std::min(matcher.Find(*str, read_offset), str_length);
+
+ size_t length = match - read_offset;
+ if (length) {
+ CharTraits::move(buffer + write_offset, buffer + read_offset, length);
+ write_offset += length;
+ read_offset += length;
+ }
+ } while (read_offset < str_length);
+
+ // If we're shortening the string, truncate it now.
+ str->resize(write_offset);
+ return true;
+}
+
+template <class StringType>
+bool ReplaceCharsT(const StringType& input,
+ BasicStringPiece<StringType> find_any_of_these,
+ BasicStringPiece<StringType> replace_with,
+ StringType* output) {
+ // Commonly, this is called with output and input being the same string; in
+ // that case, this assignment is inexpensive.
+ *output = input;
+
+ return DoReplaceMatchesAfterOffset(
+ output, 0, CharacterMatcher<StringType>{find_any_of_these}, replace_with,
+ ReplaceType::REPLACE_ALL);
+}
+
+void ReplaceFirstSubstringAfterOffset(string16* str,
+ size_t start_offset,
+ StringPiece16 find_this,
+ StringPiece16 replace_with) {
+ DoReplaceMatchesAfterOffset(str, start_offset,
+ SubstringMatcher<string16>{find_this},
+ replace_with, ReplaceType::REPLACE_FIRST);
+}
+
+void ReplaceFirstSubstringAfterOffset(std::string* str,
+ size_t start_offset,
+ StringPiece find_this,
+ StringPiece replace_with) {
+ DoReplaceMatchesAfterOffset(str, start_offset,
+ SubstringMatcher<std::string>{find_this},
+ replace_with, ReplaceType::REPLACE_FIRST);
+}
+
+void ReplaceSubstringsAfterOffset(string16* str,
+ size_t start_offset,
+ StringPiece16 find_this,
+ StringPiece16 replace_with) {
+ DoReplaceMatchesAfterOffset(str, start_offset,
+ SubstringMatcher<string16>{find_this},
+ replace_with, ReplaceType::REPLACE_ALL);
+}
+
+void ReplaceSubstringsAfterOffset(std::string* str,
+ size_t start_offset,
+ StringPiece find_this,
+ StringPiece replace_with) {
+ DoReplaceMatchesAfterOffset(str, start_offset,
+ SubstringMatcher<std::string>{find_this},
+ replace_with, ReplaceType::REPLACE_ALL);
+}
+
+template <class string_type>
+inline typename string_type::value_type* WriteIntoT(string_type* str,
+ size_t length_with_null) {
+ DCHECK_GE(length_with_null, 1u);
+ str->reserve(length_with_null);
+ str->resize(length_with_null - 1);
+ return &((*str)[0]);
+}
+
+char* WriteInto(std::string* str, size_t length_with_null) {
+ return WriteIntoT(str, length_with_null);
+}
+
+char16* WriteInto(string16* str, size_t length_with_null) {
+ return WriteIntoT(str, length_with_null);
+}
+
+#if defined(_MSC_VER) && !defined(__clang__)
+// Work around VC++ code-gen bug. https://crbug.com/804884
+#pragma optimize("", off)
+#endif
+
+// Generic version for all JoinString overloads. |list_type| must be a sequence
+// (std::vector or std::initializer_list) of strings/StringPieces (std::string,
+// string16, StringPiece or StringPiece16). |string_type| is either std::string
+// or string16.
+template <typename list_type, typename string_type>
+static string_type JoinStringT(const list_type& parts,
+ BasicStringPiece<string_type> sep) {
+ if (parts.size() == 0)
+ return string_type();
+
+ // Pre-allocate the eventual size of the string. Start with the size of all of
+ // the separators (note that this *assumes* parts.size() > 0).
+ size_t total_size = (parts.size() - 1) * sep.size();
+ for (const auto& part : parts)
+ total_size += part.size();
+ string_type result;
+ result.reserve(total_size);
+
+ auto iter = parts.begin();
+ DCHECK(iter != parts.end());
+ AppendToString(&result, *iter);
+ ++iter;
+
+ for (; iter != parts.end(); ++iter) {
+ sep.AppendToString(&result);
+ // Using the overloaded AppendToString allows this template function to work
+ // on both strings and StringPieces without creating an intermediate
+ // StringPiece object.
+ AppendToString(&result, *iter);
+ }
+
+ // Sanity-check that we pre-allocated correctly.
+ DCHECK_EQ(total_size, result.size());
+
+ return result;
+}
+
+std::string JoinString(const std::vector<std::string>& parts,
+ StringPiece separator) {
+ return JoinStringT(parts, separator);
+}
+
+string16 JoinString(const std::vector<string16>& parts,
+ StringPiece16 separator) {
+ return JoinStringT(parts, separator);
+}
+
+#if defined(_MSC_VER) && !defined(__clang__)
+// Work around VC++ code-gen bug. https://crbug.com/804884
+#pragma optimize("", on)
+#endif
+
+std::string JoinString(const std::vector<StringPiece>& parts,
+ StringPiece separator) {
+ return JoinStringT(parts, separator);
+}
+
+string16 JoinString(const std::vector<StringPiece16>& parts,
+ StringPiece16 separator) {
+ return JoinStringT(parts, separator);
+}
+
+std::string JoinString(std::initializer_list<StringPiece> parts,
+ StringPiece separator) {
+ return JoinStringT(parts, separator);
+}
+
+string16 JoinString(std::initializer_list<StringPiece16> parts,
+ StringPiece16 separator) {
+ return JoinStringT(parts, separator);
+}
+
+#if defined(OS_WIN) && defined(BASE_STRING16_IS_STD_U16STRING)
+
+TrimPositions TrimWhitespace(WStringPiece input,
+ TrimPositions positions,
+ std::wstring* output) {
+ return TrimStringT(input, WStringPiece(kWhitespaceWide), positions, output);
+}
+
+WStringPiece TrimWhitespace(WStringPiece input, TrimPositions positions) {
+ return TrimStringPieceT(input, WStringPiece(kWhitespaceWide), positions);
+}
+
+bool TrimString(WStringPiece input,
+ WStringPiece trim_chars,
+ std::wstring* output) {
+ return TrimStringT(input, trim_chars, TRIM_ALL, output) != TRIM_NONE;
+}
+
+WStringPiece TrimString(WStringPiece input,
+ WStringPiece trim_chars,
+ TrimPositions positions) {
+ return TrimStringPieceT(input, trim_chars, positions);
+}
+
+wchar_t* WriteInto(std::wstring* str, size_t length_with_null) {
+ return WriteIntoT(str, length_with_null);
+}
+
+#endif
+
+// The following code is compatible with the OpenBSD lcpy interface. See:
+// http://www.gratisoft.us/todd/papers/strlcpy.html
+// ftp://ftp.openbsd.org/pub/OpenBSD/src/lib/libc/string/{wcs,str}lcpy.c
+
+namespace {
+
+template <typename CHAR>
+size_t lcpyT(CHAR* dst, const CHAR* src, size_t dst_size) {
+ for (size_t i = 0; i < dst_size; ++i) {
+ if ((dst[i] = src[i]) == 0) // We hit and copied the terminating NULL.
+ return i;
+ }
+
+ // We were left off at dst_size. We over copied 1 byte. Null terminate.
+ if (dst_size != 0)
+ dst[dst_size - 1] = 0;
+
+ // Count the rest of the |src|, and return it's length in characters.
+ while (src[dst_size]) ++dst_size;
+ return dst_size;
+}
+
+} // namespace
+
+size_t strlcpy(char* dst, const char* src, size_t dst_size) {
+ return lcpyT<char>(dst, src, dst_size);
+}
+size_t wcslcpy(wchar_t* dst, const wchar_t* src, size_t dst_size) {
+ return lcpyT<wchar_t>(dst, src, dst_size);
+}
+
+} // namespace base
diff --git a/security/sandbox/chromium/base/strings/string_util.h b/security/sandbox/chromium/base/strings/string_util.h
new file mode 100644
index 0000000000..f6e96c13f5
--- /dev/null
+++ b/security/sandbox/chromium/base/strings/string_util.h
@@ -0,0 +1,549 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// This file defines utility functions for working with strings.
+
+#ifndef BASE_STRINGS_STRING_UTIL_H_
+#define BASE_STRINGS_STRING_UTIL_H_
+
+#include <ctype.h>
+#include <stdarg.h> // va_list
+#include <stddef.h>
+#include <stdint.h>
+
+#include <initializer_list>
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "base/stl_util.h"
+#include "base/strings/string16.h"
+#include "base/strings/string_piece.h" // For implicit conversions.
+#include "build/build_config.h"
+
+namespace base {
+
+// C standard-library functions that aren't cross-platform are provided as
+// "base::...", and their prototypes are listed below. These functions are
+// then implemented as inline calls to the platform-specific equivalents in the
+// platform-specific headers.
+
+// Wrapper for vsnprintf that always null-terminates and always returns the
+// number of characters that would be in an untruncated formatted
+// string, even when truncation occurs.
+int vsnprintf(char* buffer, size_t size, const char* format, va_list arguments)
+ PRINTF_FORMAT(3, 0);
+
+// Some of these implementations need to be inlined.
+
+// We separate the declaration from the implementation of this inline
+// function just so the PRINTF_FORMAT works.
+inline int snprintf(char* buffer, size_t size, const char* format, ...)
+ PRINTF_FORMAT(3, 4);
+inline int snprintf(char* buffer, size_t size, const char* format, ...) {
+ va_list arguments;
+ va_start(arguments, format);
+ int result = vsnprintf(buffer, size, format, arguments);
+ va_end(arguments);
+ return result;
+}
+
+// BSD-style safe and consistent string copy functions.
+// Copies |src| to |dst|, where |dst_size| is the total allocated size of |dst|.
+// Copies at most |dst_size|-1 characters, and always NULL terminates |dst|, as
+// long as |dst_size| is not 0. Returns the length of |src| in characters.
+// If the return value is >= dst_size, then the output was truncated.
+// NOTE: All sizes are in number of characters, NOT in bytes.
+BASE_EXPORT size_t strlcpy(char* dst, const char* src, size_t dst_size);
+BASE_EXPORT size_t wcslcpy(wchar_t* dst, const wchar_t* src, size_t dst_size);
+
+// Scan a wprintf format string to determine whether it's portable across a
+// variety of systems. This function only checks that the conversion
+// specifiers used by the format string are supported and have the same meaning
+// on a variety of systems. It doesn't check for other errors that might occur
+// within a format string.
+//
+// Nonportable conversion specifiers for wprintf are:
+// - 's' and 'c' without an 'l' length modifier. %s and %c operate on char
+// data on all systems except Windows, which treat them as wchar_t data.
+// Use %ls and %lc for wchar_t data instead.
+// - 'S' and 'C', which operate on wchar_t data on all systems except Windows,
+// which treat them as char data. Use %ls and %lc for wchar_t data
+// instead.
+// - 'F', which is not identified by Windows wprintf documentation.
+// - 'D', 'O', and 'U', which are deprecated and not available on all systems.
+// Use %ld, %lo, and %lu instead.
+//
+// Note that there is no portable conversion specifier for char data when
+// working with wprintf.
+//
+// This function is intended to be called from base::vswprintf.
+BASE_EXPORT bool IsWprintfFormatPortable(const wchar_t* format);
+
+// ASCII-specific tolower. The standard library's tolower is locale sensitive,
+// so we don't want to use it here.
+inline char ToLowerASCII(char c) {
+ return (c >= 'A' && c <= 'Z') ? (c + ('a' - 'A')) : c;
+}
+inline char16 ToLowerASCII(char16 c) {
+ return (c >= 'A' && c <= 'Z') ? (c + ('a' - 'A')) : c;
+}
+
+// ASCII-specific toupper. The standard library's toupper is locale sensitive,
+// so we don't want to use it here.
+inline char ToUpperASCII(char c) {
+ return (c >= 'a' && c <= 'z') ? (c + ('A' - 'a')) : c;
+}
+inline char16 ToUpperASCII(char16 c) {
+ return (c >= 'a' && c <= 'z') ? (c + ('A' - 'a')) : c;
+}
+
+// Converts the given string to it's ASCII-lowercase equivalent.
+BASE_EXPORT std::string ToLowerASCII(StringPiece str);
+BASE_EXPORT string16 ToLowerASCII(StringPiece16 str);
+
+// Converts the given string to it's ASCII-uppercase equivalent.
+BASE_EXPORT std::string ToUpperASCII(StringPiece str);
+BASE_EXPORT string16 ToUpperASCII(StringPiece16 str);
+
+// Functor for case-insensitive ASCII comparisons for STL algorithms like
+// std::search.
+//
+// Note that a full Unicode version of this functor is not possible to write
+// because case mappings might change the number of characters, depend on
+// context (combining accents), and require handling UTF-16. If you need
+// proper Unicode support, use base::i18n::ToLower/FoldCase and then just
+// use a normal operator== on the result.
+template<typename Char> struct CaseInsensitiveCompareASCII {
+ public:
+ bool operator()(Char x, Char y) const {
+ return ToLowerASCII(x) == ToLowerASCII(y);
+ }
+};
+
+// Like strcasecmp for case-insensitive ASCII characters only. Returns:
+// -1 (a < b)
+// 0 (a == b)
+// 1 (a > b)
+// (unlike strcasecmp which can return values greater or less than 1/-1). For
+// full Unicode support, use base::i18n::ToLower or base::i18h::FoldCase
+// and then just call the normal string operators on the result.
+BASE_EXPORT int CompareCaseInsensitiveASCII(StringPiece a, StringPiece b);
+BASE_EXPORT int CompareCaseInsensitiveASCII(StringPiece16 a, StringPiece16 b);
+
+// Equality for ASCII case-insensitive comparisons. For full Unicode support,
+// use base::i18n::ToLower or base::i18h::FoldCase and then compare with either
+// == or !=.
+BASE_EXPORT bool EqualsCaseInsensitiveASCII(StringPiece a, StringPiece b);
+BASE_EXPORT bool EqualsCaseInsensitiveASCII(StringPiece16 a, StringPiece16 b);
+
+// These threadsafe functions return references to globally unique empty
+// strings.
+//
+// It is likely faster to construct a new empty string object (just a few
+// instructions to set the length to 0) than to get the empty string instance
+// returned by these functions (which requires threadsafe static access).
+//
+// Therefore, DO NOT USE THESE AS A GENERAL-PURPOSE SUBSTITUTE FOR DEFAULT
+// CONSTRUCTORS. There is only one case where you should use these: functions
+// which need to return a string by reference (e.g. as a class member
+// accessor), and don't have an empty string to use (e.g. in an error case).
+// These should not be used as initializers, function arguments, or return
+// values for functions which return by value or outparam.
+BASE_EXPORT const std::string& EmptyString();
+BASE_EXPORT const string16& EmptyString16();
+
+// Contains the set of characters representing whitespace in the corresponding
+// encoding. Null-terminated. The ASCII versions are the whitespaces as defined
+// by HTML5, and don't include control characters.
+BASE_EXPORT extern const wchar_t kWhitespaceWide[]; // Includes Unicode.
+BASE_EXPORT extern const char16 kWhitespaceUTF16[]; // Includes Unicode.
+BASE_EXPORT extern const char16 kWhitespaceNoCrLfUTF16[]; // Unicode w/o CR/LF.
+BASE_EXPORT extern const char kWhitespaceASCII[];
+BASE_EXPORT extern const char16 kWhitespaceASCIIAs16[]; // No unicode.
+
+// Null-terminated string representing the UTF-8 byte order mark.
+BASE_EXPORT extern const char kUtf8ByteOrderMark[];
+
+// Removes characters in |remove_chars| from anywhere in |input|. Returns true
+// if any characters were removed. |remove_chars| must be null-terminated.
+// NOTE: Safe to use the same variable for both |input| and |output|.
+BASE_EXPORT bool RemoveChars(const string16& input,
+ StringPiece16 remove_chars,
+ string16* output);
+BASE_EXPORT bool RemoveChars(const std::string& input,
+ StringPiece remove_chars,
+ std::string* output);
+
+// Replaces characters in |replace_chars| from anywhere in |input| with
+// |replace_with|. Each character in |replace_chars| will be replaced with
+// the |replace_with| string. Returns true if any characters were replaced.
+// |replace_chars| must be null-terminated.
+// NOTE: Safe to use the same variable for both |input| and |output|.
+BASE_EXPORT bool ReplaceChars(const string16& input,
+ StringPiece16 replace_chars,
+ StringPiece16 replace_with,
+ string16* output);
+BASE_EXPORT bool ReplaceChars(const std::string& input,
+ StringPiece replace_chars,
+ StringPiece replace_with,
+ std::string* output);
+
+enum TrimPositions {
+ TRIM_NONE = 0,
+ TRIM_LEADING = 1 << 0,
+ TRIM_TRAILING = 1 << 1,
+ TRIM_ALL = TRIM_LEADING | TRIM_TRAILING,
+};
+
+// Removes characters in |trim_chars| from the beginning and end of |input|.
+// The 8-bit version only works on 8-bit characters, not UTF-8. Returns true if
+// any characters were removed.
+//
+// It is safe to use the same variable for both |input| and |output| (this is
+// the normal usage to trim in-place).
+BASE_EXPORT bool TrimString(StringPiece16 input,
+ StringPiece16 trim_chars,
+ string16* output);
+BASE_EXPORT bool TrimString(StringPiece input,
+ StringPiece trim_chars,
+ std::string* output);
+
+// StringPiece versions of the above. The returned pieces refer to the original
+// buffer.
+BASE_EXPORT StringPiece16 TrimString(StringPiece16 input,
+ StringPiece16 trim_chars,
+ TrimPositions positions);
+BASE_EXPORT StringPiece TrimString(StringPiece input,
+ StringPiece trim_chars,
+ TrimPositions positions);
+
+// Truncates a string to the nearest UTF-8 character that will leave
+// the string less than or equal to the specified byte size.
+BASE_EXPORT void TruncateUTF8ToByteSize(const std::string& input,
+ const size_t byte_size,
+ std::string* output);
+
+#if defined(WCHAR_T_IS_UTF16)
+// Utility functions to access the underlying string buffer as a wide char
+// pointer.
+//
+// Note: These functions violate strict aliasing when char16 and wchar_t are
+// unrelated types. We thus pass -fno-strict-aliasing to the compiler on
+// non-Windows platforms [1], and rely on it being off in Clang's CL mode [2].
+//
+// [1] https://crrev.com/b9a0976622/build/config/compiler/BUILD.gn#244
+// [2]
+// https://github.com/llvm/llvm-project/blob/1e28a66/clang/lib/Driver/ToolChains/Clang.cpp#L3949
+inline wchar_t* as_writable_wcstr(char16* str) {
+ return reinterpret_cast<wchar_t*>(str);
+}
+
+inline wchar_t* as_writable_wcstr(string16& str) {
+ return reinterpret_cast<wchar_t*>(data(str));
+}
+
+inline const wchar_t* as_wcstr(const char16* str) {
+ return reinterpret_cast<const wchar_t*>(str);
+}
+
+inline const wchar_t* as_wcstr(StringPiece16 str) {
+ return reinterpret_cast<const wchar_t*>(str.data());
+}
+
+// Utility functions to access the underlying string buffer as a char16 pointer.
+inline char16* as_writable_u16cstr(wchar_t* str) {
+ return reinterpret_cast<char16*>(str);
+}
+
+inline char16* as_writable_u16cstr(std::wstring& str) {
+ return reinterpret_cast<char16*>(data(str));
+}
+
+inline const char16* as_u16cstr(const wchar_t* str) {
+ return reinterpret_cast<const char16*>(str);
+}
+
+inline const char16* as_u16cstr(WStringPiece str) {
+ return reinterpret_cast<const char16*>(str.data());
+}
+
+// Utility functions to convert between base::WStringPiece and
+// base::StringPiece16.
+inline WStringPiece AsWStringPiece(StringPiece16 str) {
+ return WStringPiece(as_wcstr(str.data()), str.size());
+}
+
+inline StringPiece16 AsStringPiece16(WStringPiece str) {
+ return StringPiece16(as_u16cstr(str.data()), str.size());
+}
+
+inline std::wstring AsWString(StringPiece16 str) {
+ return std::wstring(as_wcstr(str.data()), str.size());
+}
+
+inline string16 AsString16(WStringPiece str) {
+ return string16(as_u16cstr(str.data()), str.size());
+}
+#endif // defined(WCHAR_T_IS_UTF16)
+
+// Trims any whitespace from either end of the input string.
+//
+// The StringPiece versions return a substring referencing the input buffer.
+// The ASCII versions look only for ASCII whitespace.
+//
+// The std::string versions return where whitespace was found.
+// NOTE: Safe to use the same variable for both input and output.
+BASE_EXPORT TrimPositions TrimWhitespace(StringPiece16 input,
+ TrimPositions positions,
+ string16* output);
+BASE_EXPORT StringPiece16 TrimWhitespace(StringPiece16 input,
+ TrimPositions positions);
+BASE_EXPORT TrimPositions TrimWhitespaceASCII(StringPiece input,
+ TrimPositions positions,
+ std::string* output);
+BASE_EXPORT StringPiece TrimWhitespaceASCII(StringPiece input,
+ TrimPositions positions);
+
+// Searches for CR or LF characters. Removes all contiguous whitespace
+// strings that contain them. This is useful when trying to deal with text
+// copied from terminals.
+// Returns |text|, with the following three transformations:
+// (1) Leading and trailing whitespace is trimmed.
+// (2) If |trim_sequences_with_line_breaks| is true, any other whitespace
+// sequences containing a CR or LF are trimmed.
+// (3) All other whitespace sequences are converted to single spaces.
+BASE_EXPORT string16 CollapseWhitespace(
+ const string16& text,
+ bool trim_sequences_with_line_breaks);
+BASE_EXPORT std::string CollapseWhitespaceASCII(
+ const std::string& text,
+ bool trim_sequences_with_line_breaks);
+
+// Returns true if |input| is empty or contains only characters found in
+// |characters|.
+BASE_EXPORT bool ContainsOnlyChars(StringPiece input, StringPiece characters);
+BASE_EXPORT bool ContainsOnlyChars(StringPiece16 input,
+ StringPiece16 characters);
+
+// Returns true if |str| is structurally valid UTF-8 and also doesn't
+// contain any non-character code point (e.g. U+10FFFE). Prohibiting
+// non-characters increases the likelihood of detecting non-UTF-8 in
+// real-world text, for callers which do not need to accept
+// non-characters in strings.
+BASE_EXPORT bool IsStringUTF8(StringPiece str);
+
+// Returns true if |str| contains valid UTF-8, allowing non-character
+// code points.
+BASE_EXPORT bool IsStringUTF8AllowingNoncharacters(StringPiece str);
+
+// Returns true if |str| contains only valid ASCII character values.
+// Note 1: IsStringASCII executes in time determined solely by the
+// length of the string, not by its contents, so it is robust against
+// timing attacks for all strings of equal length.
+// Note 2: IsStringASCII assumes the input is likely all ASCII, and
+// does not leave early if it is not the case.
+BASE_EXPORT bool IsStringASCII(StringPiece str);
+BASE_EXPORT bool IsStringASCII(StringPiece16 str);
+#if defined(WCHAR_T_IS_UTF32)
+BASE_EXPORT bool IsStringASCII(WStringPiece str);
+#endif
+
+// Compare the lower-case form of the given string against the given
+// previously-lower-cased ASCII string (typically a constant).
+BASE_EXPORT bool LowerCaseEqualsASCII(StringPiece str,
+ StringPiece lowecase_ascii);
+BASE_EXPORT bool LowerCaseEqualsASCII(StringPiece16 str,
+ StringPiece lowecase_ascii);
+
+// Performs a case-sensitive string compare of the given 16-bit string against
+// the given 8-bit ASCII string (typically a constant). The behavior is
+// undefined if the |ascii| string is not ASCII.
+BASE_EXPORT bool EqualsASCII(StringPiece16 str, StringPiece ascii);
+
+// Indicates case sensitivity of comparisons. Only ASCII case insensitivity
+// is supported. Full Unicode case-insensitive conversions would need to go in
+// base/i18n so it can use ICU.
+//
+// If you need to do Unicode-aware case-insensitive StartsWith/EndsWith, it's
+// best to call base::i18n::ToLower() or base::i18n::FoldCase() (see
+// base/i18n/case_conversion.h for usage advice) on the arguments, and then use
+// the results to a case-sensitive comparison.
+enum class CompareCase {
+ SENSITIVE,
+ INSENSITIVE_ASCII,
+};
+
+BASE_EXPORT bool StartsWith(StringPiece str,
+ StringPiece search_for,
+ CompareCase case_sensitivity);
+BASE_EXPORT bool StartsWith(StringPiece16 str,
+ StringPiece16 search_for,
+ CompareCase case_sensitivity);
+BASE_EXPORT bool EndsWith(StringPiece str,
+ StringPiece search_for,
+ CompareCase case_sensitivity);
+BASE_EXPORT bool EndsWith(StringPiece16 str,
+ StringPiece16 search_for,
+ CompareCase case_sensitivity);
+
+// Determines the type of ASCII character, independent of locale (the C
+// library versions will change based on locale).
+template <typename Char>
+inline bool IsAsciiWhitespace(Char c) {
+ return c == ' ' || c == '\r' || c == '\n' || c == '\t' || c == '\f';
+}
+template <typename Char>
+inline bool IsAsciiAlpha(Char c) {
+ return (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z');
+}
+template <typename Char>
+inline bool IsAsciiUpper(Char c) {
+ return c >= 'A' && c <= 'Z';
+}
+template <typename Char>
+inline bool IsAsciiLower(Char c) {
+ return c >= 'a' && c <= 'z';
+}
+template <typename Char>
+inline bool IsAsciiDigit(Char c) {
+ return c >= '0' && c <= '9';
+}
+template <typename Char>
+inline bool IsAsciiPrintable(Char c) {
+ return c >= ' ' && c <= '~';
+}
+
+template <typename Char>
+inline bool IsHexDigit(Char c) {
+ return (c >= '0' && c <= '9') ||
+ (c >= 'A' && c <= 'F') ||
+ (c >= 'a' && c <= 'f');
+}
+
+// Returns the integer corresponding to the given hex character. For example:
+// '4' -> 4
+// 'a' -> 10
+// 'B' -> 11
+// Assumes the input is a valid hex character. DCHECKs in debug builds if not.
+BASE_EXPORT char HexDigitToInt(wchar_t c);
+
+// Returns true if it's a Unicode whitespace character.
+BASE_EXPORT bool IsUnicodeWhitespace(wchar_t c);
+
+// Return a byte string in human-readable format with a unit suffix. Not
+// appropriate for use in any UI; use of FormatBytes and friends in ui/base is
+// highly recommended instead. TODO(avi): Figure out how to get callers to use
+// FormatBytes instead; remove this.
+BASE_EXPORT string16 FormatBytesUnlocalized(int64_t bytes);
+
+// Starting at |start_offset| (usually 0), replace the first instance of
+// |find_this| with |replace_with|.
+BASE_EXPORT void ReplaceFirstSubstringAfterOffset(
+ base::string16* str,
+ size_t start_offset,
+ StringPiece16 find_this,
+ StringPiece16 replace_with);
+BASE_EXPORT void ReplaceFirstSubstringAfterOffset(
+ std::string* str,
+ size_t start_offset,
+ StringPiece find_this,
+ StringPiece replace_with);
+
+// Starting at |start_offset| (usually 0), look through |str| and replace all
+// instances of |find_this| with |replace_with|.
+//
+// This does entire substrings; use std::replace in <algorithm> for single
+// characters, for example:
+// std::replace(str.begin(), str.end(), 'a', 'b');
+BASE_EXPORT void ReplaceSubstringsAfterOffset(
+ string16* str,
+ size_t start_offset,
+ StringPiece16 find_this,
+ StringPiece16 replace_with);
+BASE_EXPORT void ReplaceSubstringsAfterOffset(
+ std::string* str,
+ size_t start_offset,
+ StringPiece find_this,
+ StringPiece replace_with);
+
+// Reserves enough memory in |str| to accommodate |length_with_null| characters,
+// sets the size of |str| to |length_with_null - 1| characters, and returns a
+// pointer to the underlying contiguous array of characters. This is typically
+// used when calling a function that writes results into a character array, but
+// the caller wants the data to be managed by a string-like object. It is
+// convenient in that is can be used inline in the call, and fast in that it
+// avoids copying the results of the call from a char* into a string.
+//
+// Internally, this takes linear time because the resize() call 0-fills the
+// underlying array for potentially all
+// (|length_with_null - 1| * sizeof(string_type::value_type)) bytes. Ideally we
+// could avoid this aspect of the resize() call, as we expect the caller to
+// immediately write over this memory, but there is no other way to set the size
+// of the string, and not doing that will mean people who access |str| rather
+// than str.c_str() will get back a string of whatever size |str| had on entry
+// to this function (probably 0).
+BASE_EXPORT char* WriteInto(std::string* str, size_t length_with_null);
+BASE_EXPORT char16* WriteInto(string16* str, size_t length_with_null);
+
+// Joins a vector or list of strings into a single string, inserting |separator|
+// (which may be empty) in between all elements.
+//
+// Note this is inverse of SplitString()/SplitStringPiece() defined in
+// string_split.h.
+//
+// If possible, callers should build a vector of StringPieces and use the
+// StringPiece variant, so that they do not create unnecessary copies of
+// strings. For example, instead of using SplitString, modifying the vector,
+// then using JoinString, use SplitStringPiece followed by JoinString so that no
+// copies of those strings are created until the final join operation.
+//
+// Use StrCat (in base/strings/strcat.h) if you don't need a separator.
+BASE_EXPORT std::string JoinString(const std::vector<std::string>& parts,
+ StringPiece separator);
+BASE_EXPORT string16 JoinString(const std::vector<string16>& parts,
+ StringPiece16 separator);
+BASE_EXPORT std::string JoinString(const std::vector<StringPiece>& parts,
+ StringPiece separator);
+BASE_EXPORT string16 JoinString(const std::vector<StringPiece16>& parts,
+ StringPiece16 separator);
+// Explicit initializer_list overloads are required to break ambiguity when used
+// with a literal initializer list (otherwise the compiler would not be able to
+// decide between the string and StringPiece overloads).
+BASE_EXPORT std::string JoinString(std::initializer_list<StringPiece> parts,
+ StringPiece separator);
+BASE_EXPORT string16 JoinString(std::initializer_list<StringPiece16> parts,
+ StringPiece16 separator);
+
+#if defined(OS_WIN) && defined(BASE_STRING16_IS_STD_U16STRING)
+BASE_EXPORT TrimPositions TrimWhitespace(WStringPiece input,
+ TrimPositions positions,
+ std::wstring* output);
+
+BASE_EXPORT WStringPiece TrimWhitespace(WStringPiece input,
+ TrimPositions positions);
+
+BASE_EXPORT bool TrimString(WStringPiece input,
+ WStringPiece trim_chars,
+ std::wstring* output);
+
+BASE_EXPORT WStringPiece TrimString(WStringPiece input,
+ WStringPiece trim_chars,
+ TrimPositions positions);
+
+BASE_EXPORT wchar_t* WriteInto(std::wstring* str, size_t length_with_null);
+#endif
+
+} // namespace base
+
+#if defined(OS_WIN)
+#include "base/strings/string_util_win.h"
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+#include "base/strings/string_util_posix.h"
+#else
+#error Define string operations appropriately for your platform
+#endif
+
+#endif // BASE_STRINGS_STRING_UTIL_H_
diff --git a/security/sandbox/chromium/base/strings/string_util_constants.cc b/security/sandbox/chromium/base/strings/string_util_constants.cc
new file mode 100644
index 0000000000..212c5ab082
--- /dev/null
+++ b/security/sandbox/chromium/base/strings/string_util_constants.cc
@@ -0,0 +1,54 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/string_util.h"
+
+namespace base {
+
+#define WHITESPACE_ASCII_NO_CR_LF \
+ 0x09, /* CHARACTER TABULATION */ \
+ 0x0B, /* LINE TABULATION */ \
+ 0x0C, /* FORM FEED (FF) */ \
+ 0x20 /* SPACE */
+
+#define WHITESPACE_ASCII \
+ WHITESPACE_ASCII_NO_CR_LF, /* Comment to make clang-format linebreak */ \
+ 0x0A, /* LINE FEED (LF) */ \
+ 0x0D /* CARRIAGE RETURN (CR) */
+
+#define WHITESPACE_UNICODE_NON_ASCII \
+ 0x0085, /* NEXT LINE (NEL) */ \
+ 0x00A0, /* NO-BREAK SPACE */ \
+ 0x1680, /* OGHAM SPACE MARK */ \
+ 0x2000, /* EN QUAD */ \
+ 0x2001, /* EM QUAD */ \
+ 0x2002, /* EN SPACE */ \
+ 0x2003, /* EM SPACE */ \
+ 0x2004, /* THREE-PER-EM SPACE */ \
+ 0x2005, /* FOUR-PER-EM SPACE */ \
+ 0x2006, /* SIX-PER-EM SPACE */ \
+ 0x2007, /* FIGURE SPACE */ \
+ 0x2008, /* PUNCTUATION SPACE */ \
+ 0x2009, /* THIN SPACE */ \
+ 0x200A, /* HAIR SPACE */ \
+ 0x2028, /* LINE SEPARATOR */ \
+ 0x2029, /* PARAGRAPH SEPARATOR */ \
+ 0x202F, /* NARROW NO-BREAK SPACE */ \
+ 0x205F, /* MEDIUM MATHEMATICAL SPACE */ \
+ 0x3000 /* IDEOGRAPHIC SPACE */
+
+#define WHITESPACE_UNICODE_NO_CR_LF \
+ WHITESPACE_ASCII_NO_CR_LF, WHITESPACE_UNICODE_NON_ASCII
+
+#define WHITESPACE_UNICODE WHITESPACE_ASCII, WHITESPACE_UNICODE_NON_ASCII
+
+const wchar_t kWhitespaceWide[] = {WHITESPACE_UNICODE, 0};
+const char16 kWhitespaceUTF16[] = {WHITESPACE_UNICODE, 0};
+const char16 kWhitespaceNoCrLfUTF16[] = {WHITESPACE_UNICODE_NO_CR_LF, 0};
+const char kWhitespaceASCII[] = {WHITESPACE_ASCII, 0};
+const char16 kWhitespaceASCIIAs16[] = {WHITESPACE_ASCII, 0};
+
+const char kUtf8ByteOrderMark[] = "\xEF\xBB\xBF";
+
+} // namespace base
diff --git a/security/sandbox/chromium/base/strings/string_util_posix.h b/security/sandbox/chromium/base/strings/string_util_posix.h
new file mode 100644
index 0000000000..8299118e10
--- /dev/null
+++ b/security/sandbox/chromium/base/strings/string_util_posix.h
@@ -0,0 +1,37 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_STRINGS_STRING_UTIL_POSIX_H_
+#define BASE_STRINGS_STRING_UTIL_POSIX_H_
+
+#include <stdarg.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <string.h>
+#include <wchar.h>
+
+#include "base/logging.h"
+
+namespace base {
+
+// Chromium code style is to not use malloc'd strings; this is only for use
+// for interaction with APIs that require it.
+inline char* strdup(const char* str) {
+ return ::strdup(str);
+}
+
+inline int vsnprintf(char* buffer, size_t size,
+ const char* format, va_list arguments) {
+ return ::vsnprintf(buffer, size, format, arguments);
+}
+
+inline int vswprintf(wchar_t* buffer, size_t size,
+ const wchar_t* format, va_list arguments) {
+ DCHECK(IsWprintfFormatPortable(format));
+ return ::vswprintf(buffer, size, format, arguments);
+}
+
+} // namespace base
+
+#endif // BASE_STRINGS_STRING_UTIL_POSIX_H_
diff --git a/security/sandbox/chromium/base/strings/string_util_win.h b/security/sandbox/chromium/base/strings/string_util_win.h
new file mode 100644
index 0000000000..7f260bfc8b
--- /dev/null
+++ b/security/sandbox/chromium/base/strings/string_util_win.h
@@ -0,0 +1,44 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_STRINGS_STRING_UTIL_WIN_H_
+#define BASE_STRINGS_STRING_UTIL_WIN_H_
+
+#include <stdarg.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <string.h>
+#include <wchar.h>
+
+#include "base/logging.h"
+
+namespace base {
+
+// Chromium code style is to not use malloc'd strings; this is only for use
+// for interaction with APIs that require it.
+inline char* strdup(const char* str) {
+ return _strdup(str);
+}
+
+inline int vsnprintf(char* buffer, size_t size,
+ const char* format, va_list arguments) {
+ int length = vsnprintf_s(buffer, size, size - 1, format, arguments);
+ if (length < 0)
+ return _vscprintf(format, arguments);
+ return length;
+}
+
+inline int vswprintf(wchar_t* buffer, size_t size,
+ const wchar_t* format, va_list arguments) {
+ DCHECK(IsWprintfFormatPortable(format));
+
+ int length = _vsnwprintf_s(buffer, size, size - 1, format, arguments);
+ if (length < 0)
+ return _vscwprintf(format, arguments);
+ return length;
+}
+
+} // namespace base
+
+#endif // BASE_STRINGS_STRING_UTIL_WIN_H_
diff --git a/security/sandbox/chromium/base/strings/stringprintf.cc b/security/sandbox/chromium/base/strings/stringprintf.cc
new file mode 100644
index 0000000000..738cc63bbe
--- /dev/null
+++ b/security/sandbox/chromium/base/strings/stringprintf.cc
@@ -0,0 +1,225 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/stringprintf.h"
+
+#include <errno.h>
+#include <stddef.h>
+
+#include <vector>
+
+#include "base/scoped_clear_last_error.h"
+#include "base/stl_util.h"
+#include "base/strings/string_util.h"
+#include "base/strings/utf_string_conversions.h"
+#include "build/build_config.h"
+
+namespace base {
+
+namespace {
+
+// Overloaded wrappers around vsnprintf and vswprintf. The buf_size parameter
+// is the size of the buffer. These return the number of characters in the
+// formatted string excluding the NUL terminator. If the buffer is not
+// large enough to accommodate the formatted string without truncation, they
+// return the number of characters that would be in the fully-formatted string
+// (vsnprintf, and vswprintf on Windows), or -1 (vswprintf on POSIX platforms).
+inline int vsnprintfT(char* buffer,
+ size_t buf_size,
+ const char* format,
+ va_list argptr) {
+ return base::vsnprintf(buffer, buf_size, format, argptr);
+}
+
+#if defined(OS_WIN)
+inline int vsnprintfT(wchar_t* buffer,
+ size_t buf_size,
+ const wchar_t* format,
+ va_list argptr) {
+ return base::vswprintf(buffer, buf_size, format, argptr);
+}
+inline int vsnprintfT(char16_t* buffer,
+ size_t buf_size,
+ const char16_t* format,
+ va_list argptr) {
+ return base::vswprintf(reinterpret_cast<wchar_t*>(buffer), buf_size,
+ reinterpret_cast<const wchar_t*>(format), argptr);
+}
+#endif
+
+// Templatized backend for StringPrintF/StringAppendF. This does not finalize
+// the va_list, the caller is expected to do that.
+template <class CharT>
+static void StringAppendVT(std::basic_string<CharT>* dst,
+ const CharT* format,
+ va_list ap) {
+ // First try with a small fixed size buffer.
+ // This buffer size should be kept in sync with StringUtilTest.GrowBoundary
+ // and StringUtilTest.StringPrintfBounds.
+ CharT stack_buf[1024];
+
+ va_list ap_copy;
+ va_copy(ap_copy, ap);
+
+ base::internal::ScopedClearLastError last_error;
+ int result = vsnprintfT(stack_buf, base::size(stack_buf), format, ap_copy);
+ va_end(ap_copy);
+
+ if (result >= 0 && result < static_cast<int>(base::size(stack_buf))) {
+ // It fit.
+ dst->append(stack_buf, result);
+ return;
+ }
+
+ // Repeatedly increase buffer size until it fits.
+ int mem_length = base::size(stack_buf);
+ while (true) {
+ if (result < 0) {
+#if defined(OS_WIN)
+ // On Windows, vsnprintfT always returns the number of characters in a
+ // fully-formatted string, so if we reach this point, something else is
+ // wrong and no amount of buffer-doubling is going to fix it.
+ return;
+#else
+ if (errno != 0 && errno != EOVERFLOW)
+ return;
+ // Try doubling the buffer size.
+ mem_length *= 2;
+#endif
+ } else {
+ // We need exactly "result + 1" characters.
+ mem_length = result + 1;
+ }
+
+ if (mem_length > 32 * 1024 * 1024) {
+ // That should be plenty, don't try anything larger. This protects
+ // against huge allocations when using vsnprintfT implementations that
+ // return -1 for reasons other than overflow without setting errno.
+ DLOG(WARNING) << "Unable to printf the requested string due to size.";
+ return;
+ }
+
+ std::vector<CharT> mem_buf(mem_length);
+
+ // NOTE: You can only use a va_list once. Since we're in a while loop, we
+ // need to make a new copy each time so we don't use up the original.
+ va_copy(ap_copy, ap);
+ result = vsnprintfT(&mem_buf[0], mem_length, format, ap_copy);
+ va_end(ap_copy);
+
+ if ((result >= 0) && (result < mem_length)) {
+ // It fit.
+ dst->append(&mem_buf[0], result);
+ return;
+ }
+ }
+}
+
+} // namespace
+
+std::string StringPrintf(const char* format, ...) {
+ va_list ap;
+ va_start(ap, format);
+ std::string result;
+ StringAppendV(&result, format, ap);
+ va_end(ap);
+ return result;
+}
+
+#if defined(OS_WIN)
+std::wstring StringPrintf(const wchar_t* format, ...) {
+ va_list ap;
+ va_start(ap, format);
+ std::wstring result;
+ StringAppendV(&result, format, ap);
+ va_end(ap);
+ return result;
+}
+
+std::u16string StringPrintf(const char16_t* format, ...) {
+ va_list ap;
+ va_start(ap, format);
+ std::u16string result;
+ StringAppendV(&result, format, ap);
+ va_end(ap);
+ return result;
+}
+#endif
+
+std::string StringPrintV(const char* format, va_list ap) {
+ std::string result;
+ StringAppendV(&result, format, ap);
+ return result;
+}
+
+const std::string& SStringPrintf(std::string* dst, const char* format, ...) {
+ va_list ap;
+ va_start(ap, format);
+ dst->clear();
+ StringAppendV(dst, format, ap);
+ va_end(ap);
+ return *dst;
+}
+
+#if defined(OS_WIN)
+const std::wstring& SStringPrintf(std::wstring* dst,
+ const wchar_t* format, ...) {
+ va_list ap;
+ va_start(ap, format);
+ dst->clear();
+ StringAppendV(dst, format, ap);
+ va_end(ap);
+ return *dst;
+}
+
+const std::u16string& SStringPrintf(std::u16string* dst,
+ const char16_t* format,
+ ...) {
+ va_list ap;
+ va_start(ap, format);
+ dst->clear();
+ StringAppendV(dst, format, ap);
+ va_end(ap);
+ return *dst;
+}
+#endif
+
+void StringAppendF(std::string* dst, const char* format, ...) {
+ va_list ap;
+ va_start(ap, format);
+ StringAppendV(dst, format, ap);
+ va_end(ap);
+}
+
+#if defined(OS_WIN)
+void StringAppendF(std::wstring* dst, const wchar_t* format, ...) {
+ va_list ap;
+ va_start(ap, format);
+ StringAppendV(dst, format, ap);
+ va_end(ap);
+}
+
+void StringAppendF(std::u16string* dst, const char16_t* format, ...) {
+ va_list ap;
+ va_start(ap, format);
+ StringAppendV(dst, format, ap);
+ va_end(ap);
+}
+#endif
+
+void StringAppendV(std::string* dst, const char* format, va_list ap) {
+ StringAppendVT(dst, format, ap);
+}
+
+#if defined(OS_WIN)
+void StringAppendV(std::wstring* dst, const wchar_t* format, va_list ap) {
+ StringAppendVT(dst, format, ap);
+}
+
+void StringAppendV(std::u16string* dst, const char16_t* format, va_list ap) {
+ StringAppendVT(dst, format, ap);
+}
+#endif
+
+} // namespace base
diff --git a/security/sandbox/chromium/base/strings/stringprintf.h b/security/sandbox/chromium/base/strings/stringprintf.h
new file mode 100644
index 0000000000..a8d5bc84e9
--- /dev/null
+++ b/security/sandbox/chromium/base/strings/stringprintf.h
@@ -0,0 +1,74 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_STRINGS_STRINGPRINTF_H_
+#define BASE_STRINGS_STRINGPRINTF_H_
+
+#include <stdarg.h> // va_list
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "build/build_config.h"
+
+namespace base {
+
+// Return a C++ string given printf-like input.
+BASE_EXPORT std::string StringPrintf(const char* format, ...)
+ PRINTF_FORMAT(1, 2) WARN_UNUSED_RESULT;
+#if defined(OS_WIN)
+// Note: Unfortunately compile time checking of the format string for UTF-16
+// strings is not supported by any compiler, thus these functions should be used
+// carefully and sparingly. Also applies to SStringPrintf and StringAppendV
+// below.
+BASE_EXPORT std::wstring StringPrintf(const wchar_t* format, ...)
+ WPRINTF_FORMAT(1, 2) WARN_UNUSED_RESULT;
+BASE_EXPORT std::u16string StringPrintf(const char16_t* format, ...)
+ WPRINTF_FORMAT(1, 2) WARN_UNUSED_RESULT;
+#endif
+
+// Return a C++ string given vprintf-like input.
+BASE_EXPORT std::string StringPrintV(const char* format, va_list ap)
+ PRINTF_FORMAT(1, 0) WARN_UNUSED_RESULT;
+
+// Store result into a supplied string and return it.
+BASE_EXPORT const std::string& SStringPrintf(std::string* dst,
+ const char* format,
+ ...) PRINTF_FORMAT(2, 3);
+#if defined(OS_WIN)
+BASE_EXPORT const std::wstring& SStringPrintf(std::wstring* dst,
+ const wchar_t* format,
+ ...) WPRINTF_FORMAT(2, 3);
+BASE_EXPORT const std::u16string& SStringPrintf(std::u16string* dst,
+ const char16_t* format,
+ ...) WPRINTF_FORMAT(2, 3);
+#endif
+
+// Append result to a supplied string.
+BASE_EXPORT void StringAppendF(std::string* dst, const char* format, ...)
+ PRINTF_FORMAT(2, 3);
+#if defined(OS_WIN)
+BASE_EXPORT void StringAppendF(std::wstring* dst, const wchar_t* format, ...)
+ WPRINTF_FORMAT(2, 3);
+BASE_EXPORT void StringAppendF(std::u16string* dst, const char16_t* format, ...)
+ WPRINTF_FORMAT(2, 3);
+#endif
+
+// Lower-level routine that takes a va_list and appends to a specified
+// string. All other routines are just convenience wrappers around it.
+BASE_EXPORT void StringAppendV(std::string* dst, const char* format, va_list ap)
+ PRINTF_FORMAT(2, 0);
+#if defined(OS_WIN)
+BASE_EXPORT void StringAppendV(std::wstring* dst,
+ const wchar_t* format,
+ va_list ap) WPRINTF_FORMAT(2, 0);
+BASE_EXPORT void StringAppendV(std::u16string* dst,
+ const char16_t* format,
+ va_list ap) WPRINTF_FORMAT(2, 0);
+#endif
+
+} // namespace base
+
+#endif // BASE_STRINGS_STRINGPRINTF_H_
diff --git a/security/sandbox/chromium/base/strings/utf_string_conversion_utils.cc b/security/sandbox/chromium/base/strings/utf_string_conversion_utils.cc
new file mode 100644
index 0000000000..f7682c1be9
--- /dev/null
+++ b/security/sandbox/chromium/base/strings/utf_string_conversion_utils.cc
@@ -0,0 +1,155 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/utf_string_conversion_utils.h"
+
+#include "base/third_party/icu/icu_utf.h"
+#include "build/build_config.h"
+
+namespace base {
+
+// ReadUnicodeCharacter --------------------------------------------------------
+
+bool ReadUnicodeCharacter(const char* src,
+ int32_t src_len,
+ int32_t* char_index,
+ uint32_t* code_point_out) {
+ // U8_NEXT expects to be able to use -1 to signal an error, so we must
+ // use a signed type for code_point. But this function returns false
+ // on error anyway, so code_point_out is unsigned.
+ int32_t code_point;
+ CBU8_NEXT(src, *char_index, src_len, code_point);
+ *code_point_out = static_cast<uint32_t>(code_point);
+
+ // The ICU macro above moves to the next char, we want to point to the last
+ // char consumed.
+ (*char_index)--;
+
+ // Validate the decoded value.
+ return IsValidCodepoint(code_point);
+}
+
+bool ReadUnicodeCharacter(const char16* src,
+ int32_t src_len,
+ int32_t* char_index,
+ uint32_t* code_point) {
+ if (CBU16_IS_SURROGATE(src[*char_index])) {
+ if (!CBU16_IS_SURROGATE_LEAD(src[*char_index]) ||
+ *char_index + 1 >= src_len ||
+ !CBU16_IS_TRAIL(src[*char_index + 1])) {
+ // Invalid surrogate pair.
+ return false;
+ }
+
+ // Valid surrogate pair.
+ *code_point = CBU16_GET_SUPPLEMENTARY(src[*char_index],
+ src[*char_index + 1]);
+ (*char_index)++;
+ } else {
+ // Not a surrogate, just one 16-bit word.
+ *code_point = src[*char_index];
+ }
+
+ return IsValidCodepoint(*code_point);
+}
+
+#if defined(WCHAR_T_IS_UTF32)
+bool ReadUnicodeCharacter(const wchar_t* src,
+ int32_t src_len,
+ int32_t* char_index,
+ uint32_t* code_point) {
+ // Conversion is easy since the source is 32-bit.
+ *code_point = src[*char_index];
+
+ // Validate the value.
+ return IsValidCodepoint(*code_point);
+}
+#endif // defined(WCHAR_T_IS_UTF32)
+
+// WriteUnicodeCharacter -------------------------------------------------------
+
+size_t WriteUnicodeCharacter(uint32_t code_point, std::string* output) {
+ if (code_point <= 0x7f) {
+ // Fast path the common case of one byte.
+ output->push_back(static_cast<char>(code_point));
+ return 1;
+ }
+
+
+ // CBU8_APPEND_UNSAFE can append up to 4 bytes.
+ size_t char_offset = output->length();
+ size_t original_char_offset = char_offset;
+ output->resize(char_offset + CBU8_MAX_LENGTH);
+
+ CBU8_APPEND_UNSAFE(&(*output)[0], char_offset, code_point);
+
+ // CBU8_APPEND_UNSAFE will advance our pointer past the inserted character, so
+ // it will represent the new length of the string.
+ output->resize(char_offset);
+ return char_offset - original_char_offset;
+}
+
+size_t WriteUnicodeCharacter(uint32_t code_point, string16* output) {
+ if (CBU16_LENGTH(code_point) == 1) {
+ // Thie code point is in the Basic Multilingual Plane (BMP).
+ output->push_back(static_cast<char16>(code_point));
+ return 1;
+ }
+ // Non-BMP characters use a double-character encoding.
+ size_t char_offset = output->length();
+ output->resize(char_offset + CBU16_MAX_LENGTH);
+ CBU16_APPEND_UNSAFE(&(*output)[0], char_offset, code_point);
+ return CBU16_MAX_LENGTH;
+}
+
+// Generalized Unicode converter -----------------------------------------------
+
+template<typename CHAR>
+void PrepareForUTF8Output(const CHAR* src,
+ size_t src_len,
+ std::string* output) {
+ output->clear();
+ if (src_len == 0)
+ return;
+ if (src[0] < 0x80) {
+ // Assume that the entire input will be ASCII.
+ output->reserve(src_len);
+ } else {
+ // Assume that the entire input is non-ASCII and will have 3 bytes per char.
+ output->reserve(src_len * 3);
+ }
+}
+
+// Instantiate versions we know callers will need.
+#if !defined(OS_WIN)
+// wchar_t and char16 are the same thing on Windows.
+template void PrepareForUTF8Output(const wchar_t*, size_t, std::string*);
+#endif
+template void PrepareForUTF8Output(const char16*, size_t, std::string*);
+
+template<typename STRING>
+void PrepareForUTF16Or32Output(const char* src,
+ size_t src_len,
+ STRING* output) {
+ output->clear();
+ if (src_len == 0)
+ return;
+ if (static_cast<unsigned char>(src[0]) < 0x80) {
+ // Assume the input is all ASCII, which means 1:1 correspondence.
+ output->reserve(src_len);
+ } else {
+ // Otherwise assume that the UTF-8 sequences will have 2 bytes for each
+ // character.
+ output->reserve(src_len / 2);
+ }
+}
+
+// Instantiate versions we know callers will need.
+#if !defined(OS_WIN)
+// std::wstring and string16 are the same thing on Windows.
+template void PrepareForUTF16Or32Output(const char*, size_t, std::wstring*);
+#endif
+template void PrepareForUTF16Or32Output(const char*, size_t, string16*);
+
+} // namespace base
diff --git a/security/sandbox/chromium/base/strings/utf_string_conversion_utils.h b/security/sandbox/chromium/base/strings/utf_string_conversion_utils.h
new file mode 100644
index 0000000000..01d24c3e2e
--- /dev/null
+++ b/security/sandbox/chromium/base/strings/utf_string_conversion_utils.h
@@ -0,0 +1,103 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_STRINGS_UTF_STRING_CONVERSION_UTILS_H_
+#define BASE_STRINGS_UTF_STRING_CONVERSION_UTILS_H_
+
+// Low-level UTF handling functions. Most code will want to use the functions
+// in utf_string_conversions.h
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/base_export.h"
+#include "base/strings/string16.h"
+
+namespace base {
+
+inline bool IsValidCodepoint(uint32_t code_point) {
+ // Excludes code points that are not Unicode scalar values, i.e.
+ // surrogate code points ([0xD800, 0xDFFF]). Additionally, excludes
+ // code points larger than 0x10FFFF (the highest codepoint allowed).
+ // Non-characters and unassigned code points are allowed.
+ // https://unicode.org/glossary/#unicode_scalar_value
+ return code_point < 0xD800u ||
+ (code_point >= 0xE000u && code_point <= 0x10FFFFu);
+}
+
+inline bool IsValidCharacter(uint32_t code_point) {
+ // Excludes non-characters (U+FDD0..U+FDEF, and all code points
+ // ending in 0xFFFE or 0xFFFF) from the set of valid code points.
+ // https://unicode.org/faq/private_use.html#nonchar1
+ return code_point < 0xD800u || (code_point >= 0xE000u &&
+ code_point < 0xFDD0u) || (code_point > 0xFDEFu &&
+ code_point <= 0x10FFFFu && (code_point & 0xFFFEu) != 0xFFFEu);
+}
+
+// ReadUnicodeCharacter --------------------------------------------------------
+
+// Reads a UTF-8 stream, placing the next code point into the given output
+// |*code_point|. |src| represents the entire string to read, and |*char_index|
+// is the character offset within the string to start reading at. |*char_index|
+// will be updated to index the last character read, such that incrementing it
+// (as in a for loop) will take the reader to the next character.
+//
+// Returns true on success. On false, |*code_point| will be invalid.
+BASE_EXPORT bool ReadUnicodeCharacter(const char* src,
+ int32_t src_len,
+ int32_t* char_index,
+ uint32_t* code_point_out);
+
+// Reads a UTF-16 character. The usage is the same as the 8-bit version above.
+BASE_EXPORT bool ReadUnicodeCharacter(const char16* src,
+ int32_t src_len,
+ int32_t* char_index,
+ uint32_t* code_point);
+
+#if defined(WCHAR_T_IS_UTF32)
+// Reads UTF-32 character. The usage is the same as the 8-bit version above.
+BASE_EXPORT bool ReadUnicodeCharacter(const wchar_t* src,
+ int32_t src_len,
+ int32_t* char_index,
+ uint32_t* code_point);
+#endif // defined(WCHAR_T_IS_UTF32)
+
+// WriteUnicodeCharacter -------------------------------------------------------
+
+// Appends a UTF-8 character to the given 8-bit string. Returns the number of
+// bytes written.
+BASE_EXPORT size_t WriteUnicodeCharacter(uint32_t code_point,
+ std::string* output);
+
+// Appends the given code point as a UTF-16 character to the given 16-bit
+// string. Returns the number of 16-bit values written.
+BASE_EXPORT size_t WriteUnicodeCharacter(uint32_t code_point, string16* output);
+
+#if defined(WCHAR_T_IS_UTF32)
+// Appends the given UTF-32 character to the given 32-bit string. Returns the
+// number of 32-bit values written.
+inline size_t WriteUnicodeCharacter(uint32_t code_point, std::wstring* output) {
+ // This is the easy case, just append the character.
+ output->push_back(code_point);
+ return 1;
+}
+#endif // defined(WCHAR_T_IS_UTF32)
+
+// Generalized Unicode converter -----------------------------------------------
+
+// Guesses the length of the output in UTF-8 in bytes, clears that output
+// string, and reserves that amount of space. We assume that the input
+// character types are unsigned, which will be true for UTF-16 and -32 on our
+// systems.
+template<typename CHAR>
+void PrepareForUTF8Output(const CHAR* src, size_t src_len, std::string* output);
+
+// Prepares an output buffer (containing either UTF-16 or -32 data) given some
+// UTF-8 input that will be converted to it. See PrepareForUTF8Output().
+template<typename STRING>
+void PrepareForUTF16Or32Output(const char* src, size_t src_len, STRING* output);
+
+} // namespace base
+
+#endif // BASE_STRINGS_UTF_STRING_CONVERSION_UTILS_H_
diff --git a/security/sandbox/chromium/base/strings/utf_string_conversions.cc b/security/sandbox/chromium/base/strings/utf_string_conversions.cc
new file mode 100644
index 0000000000..9a79889159
--- /dev/null
+++ b/security/sandbox/chromium/base/strings/utf_string_conversions.cc
@@ -0,0 +1,342 @@
+// Copyright (c) 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/utf_string_conversions.h"
+
+#include <limits.h>
+#include <stdint.h>
+
+#include <type_traits>
+
+#include "base/strings/string_piece.h"
+#include "base/strings/string_util.h"
+#include "base/strings/utf_string_conversion_utils.h"
+#include "base/third_party/icu/icu_utf.h"
+#include "build/build_config.h"
+
+namespace base {
+
+namespace {
+
+constexpr int32_t kErrorCodePoint = 0xFFFD;
+
+// Size coefficient ----------------------------------------------------------
+// The maximum number of codeunits in the destination encoding corresponding to
+// one codeunit in the source encoding.
+
+template <typename SrcChar, typename DestChar>
+struct SizeCoefficient {
+ static_assert(sizeof(SrcChar) < sizeof(DestChar),
+ "Default case: from a smaller encoding to the bigger one");
+
+ // ASCII symbols are encoded by one codeunit in all encodings.
+ static constexpr int value = 1;
+};
+
+template <>
+struct SizeCoefficient<char16, char> {
+ // One UTF-16 codeunit corresponds to at most 3 codeunits in UTF-8.
+ static constexpr int value = 3;
+};
+
+#if defined(WCHAR_T_IS_UTF32)
+template <>
+struct SizeCoefficient<wchar_t, char> {
+ // UTF-8 uses at most 4 codeunits per character.
+ static constexpr int value = 4;
+};
+
+template <>
+struct SizeCoefficient<wchar_t, char16> {
+ // UTF-16 uses at most 2 codeunits per character.
+ static constexpr int value = 2;
+};
+#endif // defined(WCHAR_T_IS_UTF32)
+
+template <typename SrcChar, typename DestChar>
+constexpr int size_coefficient_v =
+ SizeCoefficient<std::decay_t<SrcChar>, std::decay_t<DestChar>>::value;
+
+// UnicodeAppendUnsafe --------------------------------------------------------
+// Function overloads that write code_point to the output string. Output string
+// has to have enough space for the codepoint.
+
+// Convenience typedef that checks whether the passed in type is integral (i.e.
+// bool, char, int or their extended versions) and is of the correct size.
+template <typename Char, size_t N>
+using EnableIfBitsAre = std::enable_if_t<std::is_integral<Char>::value &&
+ CHAR_BIT * sizeof(Char) == N,
+ bool>;
+
+template <typename Char, EnableIfBitsAre<Char, 8> = true>
+void UnicodeAppendUnsafe(Char* out, int32_t* size, uint32_t code_point) {
+ CBU8_APPEND_UNSAFE(out, *size, code_point);
+}
+
+template <typename Char, EnableIfBitsAre<Char, 16> = true>
+void UnicodeAppendUnsafe(Char* out, int32_t* size, uint32_t code_point) {
+ CBU16_APPEND_UNSAFE(out, *size, code_point);
+}
+
+template <typename Char, EnableIfBitsAre<Char, 32> = true>
+void UnicodeAppendUnsafe(Char* out, int32_t* size, uint32_t code_point) {
+ out[(*size)++] = code_point;
+}
+
+// DoUTFConversion ------------------------------------------------------------
+// Main driver of UTFConversion specialized for different Src encodings.
+// dest has to have enough room for the converted text.
+
+template <typename DestChar>
+bool DoUTFConversion(const char* src,
+ int32_t src_len,
+ DestChar* dest,
+ int32_t* dest_len) {
+ bool success = true;
+
+ for (int32_t i = 0; i < src_len;) {
+ int32_t code_point;
+ CBU8_NEXT(src, i, src_len, code_point);
+
+ if (!IsValidCodepoint(code_point)) {
+ success = false;
+ code_point = kErrorCodePoint;
+ }
+
+ UnicodeAppendUnsafe(dest, dest_len, code_point);
+ }
+
+ return success;
+}
+
+template <typename DestChar>
+bool DoUTFConversion(const char16* src,
+ int32_t src_len,
+ DestChar* dest,
+ int32_t* dest_len) {
+ bool success = true;
+
+ auto ConvertSingleChar = [&success](char16 in) -> int32_t {
+ if (!CBU16_IS_SINGLE(in) || !IsValidCodepoint(in)) {
+ success = false;
+ return kErrorCodePoint;
+ }
+ return in;
+ };
+
+ int32_t i = 0;
+
+ // Always have another symbol in order to avoid checking boundaries in the
+ // middle of the surrogate pair.
+ while (i < src_len - 1) {
+ int32_t code_point;
+
+ if (CBU16_IS_LEAD(src[i]) && CBU16_IS_TRAIL(src[i + 1])) {
+ code_point = CBU16_GET_SUPPLEMENTARY(src[i], src[i + 1]);
+ if (!IsValidCodepoint(code_point)) {
+ code_point = kErrorCodePoint;
+ success = false;
+ }
+ i += 2;
+ } else {
+ code_point = ConvertSingleChar(src[i]);
+ ++i;
+ }
+
+ UnicodeAppendUnsafe(dest, dest_len, code_point);
+ }
+
+ if (i < src_len)
+ UnicodeAppendUnsafe(dest, dest_len, ConvertSingleChar(src[i]));
+
+ return success;
+}
+
+#if defined(WCHAR_T_IS_UTF32)
+
+template <typename DestChar>
+bool DoUTFConversion(const wchar_t* src,
+ int32_t src_len,
+ DestChar* dest,
+ int32_t* dest_len) {
+ bool success = true;
+
+ for (int32_t i = 0; i < src_len; ++i) {
+ int32_t code_point = src[i];
+
+ if (!IsValidCodepoint(code_point)) {
+ success = false;
+ code_point = kErrorCodePoint;
+ }
+
+ UnicodeAppendUnsafe(dest, dest_len, code_point);
+ }
+
+ return success;
+}
+
+#endif // defined(WCHAR_T_IS_UTF32)
+
+// UTFConversion --------------------------------------------------------------
+// Function template for generating all UTF conversions.
+
+template <typename InputString, typename DestString>
+bool UTFConversion(const InputString& src_str, DestString* dest_str) {
+ if (IsStringASCII(src_str)) {
+ dest_str->assign(src_str.begin(), src_str.end());
+ return true;
+ }
+
+ dest_str->resize(src_str.length() *
+ size_coefficient_v<typename InputString::value_type,
+ typename DestString::value_type>);
+
+ // Empty string is ASCII => it OK to call operator[].
+ auto* dest = &(*dest_str)[0];
+
+ // ICU requires 32 bit numbers.
+ int32_t src_len32 = static_cast<int32_t>(src_str.length());
+ int32_t dest_len32 = 0;
+
+ bool res = DoUTFConversion(src_str.data(), src_len32, dest, &dest_len32);
+
+ dest_str->resize(dest_len32);
+ dest_str->shrink_to_fit();
+
+ return res;
+}
+
+} // namespace
+
+// UTF16 <-> UTF8 --------------------------------------------------------------
+
+bool UTF8ToUTF16(const char* src, size_t src_len, string16* output) {
+ return UTFConversion(StringPiece(src, src_len), output);
+}
+
+string16 UTF8ToUTF16(StringPiece utf8) {
+ string16 ret;
+ // Ignore the success flag of this call, it will do the best it can for
+ // invalid input, which is what we want here.
+ UTF8ToUTF16(utf8.data(), utf8.size(), &ret);
+ return ret;
+}
+
+bool UTF16ToUTF8(const char16* src, size_t src_len, std::string* output) {
+ return UTFConversion(StringPiece16(src, src_len), output);
+}
+
+std::string UTF16ToUTF8(StringPiece16 utf16) {
+ std::string ret;
+ // Ignore the success flag of this call, it will do the best it can for
+ // invalid input, which is what we want here.
+ UTF16ToUTF8(utf16.data(), utf16.length(), &ret);
+ return ret;
+}
+
+// UTF-16 <-> Wide -------------------------------------------------------------
+
+#if defined(WCHAR_T_IS_UTF16)
+// When wide == UTF-16 the conversions are a NOP.
+
+bool WideToUTF16(const wchar_t* src, size_t src_len, string16* output) {
+ output->assign(src, src + src_len);
+ return true;
+}
+
+string16 WideToUTF16(WStringPiece wide) {
+ return string16(wide.begin(), wide.end());
+}
+
+bool UTF16ToWide(const char16* src, size_t src_len, std::wstring* output) {
+ output->assign(src, src + src_len);
+ return true;
+}
+
+std::wstring UTF16ToWide(StringPiece16 utf16) {
+ return std::wstring(utf16.begin(), utf16.end());
+}
+
+#elif defined(WCHAR_T_IS_UTF32)
+
+bool WideToUTF16(const wchar_t* src, size_t src_len, string16* output) {
+ return UTFConversion(base::WStringPiece(src, src_len), output);
+}
+
+string16 WideToUTF16(WStringPiece wide) {
+ string16 ret;
+ // Ignore the success flag of this call, it will do the best it can for
+ // invalid input, which is what we want here.
+ WideToUTF16(wide.data(), wide.length(), &ret);
+ return ret;
+}
+
+bool UTF16ToWide(const char16* src, size_t src_len, std::wstring* output) {
+ return UTFConversion(StringPiece16(src, src_len), output);
+}
+
+std::wstring UTF16ToWide(StringPiece16 utf16) {
+ std::wstring ret;
+ // Ignore the success flag of this call, it will do the best it can for
+ // invalid input, which is what we want here.
+ UTF16ToWide(utf16.data(), utf16.length(), &ret);
+ return ret;
+}
+
+#endif // defined(WCHAR_T_IS_UTF32)
+
+// UTF-8 <-> Wide --------------------------------------------------------------
+
+// UTF8ToWide is the same code, regardless of whether wide is 16 or 32 bits
+
+bool UTF8ToWide(const char* src, size_t src_len, std::wstring* output) {
+ return UTFConversion(StringPiece(src, src_len), output);
+}
+
+std::wstring UTF8ToWide(StringPiece utf8) {
+ std::wstring ret;
+ // Ignore the success flag of this call, it will do the best it can for
+ // invalid input, which is what we want here.
+ UTF8ToWide(utf8.data(), utf8.length(), &ret);
+ return ret;
+}
+
+#if defined(WCHAR_T_IS_UTF16)
+// Easy case since we can use the "utf" versions we already wrote above.
+
+bool WideToUTF8(const wchar_t* src, size_t src_len, std::string* output) {
+ return UTF16ToUTF8(as_u16cstr(src), src_len, output);
+}
+
+std::string WideToUTF8(WStringPiece wide) {
+ return UTF16ToUTF8(StringPiece16(as_u16cstr(wide), wide.size()));
+}
+
+#elif defined(WCHAR_T_IS_UTF32)
+
+bool WideToUTF8(const wchar_t* src, size_t src_len, std::string* output) {
+ return UTFConversion(WStringPiece(src, src_len), output);
+}
+
+std::string WideToUTF8(WStringPiece wide) {
+ std::string ret;
+ // Ignore the success flag of this call, it will do the best it can for
+ // invalid input, which is what we want here.
+ WideToUTF8(wide.data(), wide.length(), &ret);
+ return ret;
+}
+
+#endif // defined(WCHAR_T_IS_UTF32)
+
+string16 ASCIIToUTF16(StringPiece ascii) {
+ DCHECK(IsStringASCII(ascii)) << ascii;
+ return string16(ascii.begin(), ascii.end());
+}
+
+std::string UTF16ToASCII(StringPiece16 utf16) {
+ DCHECK(IsStringASCII(utf16)) << UTF16ToUTF8(utf16);
+ return std::string(utf16.begin(), utf16.end());
+}
+
+} // namespace base
diff --git a/security/sandbox/chromium/base/strings/utf_string_conversions.h b/security/sandbox/chromium/base/strings/utf_string_conversions.h
new file mode 100644
index 0000000000..f780fb4f4f
--- /dev/null
+++ b/security/sandbox/chromium/base/strings/utf_string_conversions.h
@@ -0,0 +1,54 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_STRINGS_UTF_STRING_CONVERSIONS_H_
+#define BASE_STRINGS_UTF_STRING_CONVERSIONS_H_
+
+#include <stddef.h>
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/strings/string16.h"
+#include "base/strings/string_piece.h"
+
+namespace base {
+
+// These convert between UTF-8, -16, and -32 strings. They are potentially slow,
+// so avoid unnecessary conversions. The low-level versions return a boolean
+// indicating whether the conversion was 100% valid. In this case, it will still
+// do the best it can and put the result in the output buffer. The versions that
+// return strings ignore this error and just return the best conversion
+// possible.
+BASE_EXPORT bool WideToUTF8(const wchar_t* src, size_t src_len,
+ std::string* output);
+BASE_EXPORT std::string WideToUTF8(WStringPiece wide) WARN_UNUSED_RESULT;
+BASE_EXPORT bool UTF8ToWide(const char* src, size_t src_len,
+ std::wstring* output);
+BASE_EXPORT std::wstring UTF8ToWide(StringPiece utf8) WARN_UNUSED_RESULT;
+
+BASE_EXPORT bool WideToUTF16(const wchar_t* src, size_t src_len,
+ string16* output);
+BASE_EXPORT string16 WideToUTF16(WStringPiece wide) WARN_UNUSED_RESULT;
+BASE_EXPORT bool UTF16ToWide(const char16* src, size_t src_len,
+ std::wstring* output);
+BASE_EXPORT std::wstring UTF16ToWide(StringPiece16 utf16) WARN_UNUSED_RESULT;
+
+BASE_EXPORT bool UTF8ToUTF16(const char* src, size_t src_len, string16* output);
+BASE_EXPORT string16 UTF8ToUTF16(StringPiece utf8) WARN_UNUSED_RESULT;
+BASE_EXPORT bool UTF16ToUTF8(const char16* src, size_t src_len,
+ std::string* output);
+BASE_EXPORT std::string UTF16ToUTF8(StringPiece16 utf16) WARN_UNUSED_RESULT;
+
+// This converts an ASCII string, typically a hardcoded constant, to a UTF16
+// string.
+BASE_EXPORT string16 ASCIIToUTF16(StringPiece ascii) WARN_UNUSED_RESULT;
+
+// Converts to 7-bit ASCII by truncating. The result must be known to be ASCII
+// beforehand.
+BASE_EXPORT std::string UTF16ToASCII(StringPiece16 utf16) WARN_UNUSED_RESULT;
+
+} // namespace base
+
+#endif // BASE_STRINGS_UTF_STRING_CONVERSIONS_H_
diff --git a/security/sandbox/chromium/base/synchronization/atomic_flag.h b/security/sandbox/chromium/base/synchronization/atomic_flag.h
new file mode 100644
index 0000000000..f386a16cbd
--- /dev/null
+++ b/security/sandbox/chromium/base/synchronization/atomic_flag.h
@@ -0,0 +1,50 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SYNCHRONIZATION_ATOMIC_FLAG_H_
+#define BASE_SYNCHRONIZATION_ATOMIC_FLAG_H_
+
+#include <stdint.h>
+
+#include <atomic>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/sequence_checker.h"
+
+namespace base {
+
+// A flag that can safely be set from one thread and read from other threads.
+//
+// This class IS NOT intended for synchronization between threads.
+class BASE_EXPORT AtomicFlag {
+ public:
+ AtomicFlag();
+ ~AtomicFlag();
+
+ // Set the flag. Must always be called from the same sequence.
+ void Set();
+
+ // Returns true iff the flag was set. If this returns true, the current thread
+ // is guaranteed to be synchronized with all memory operations on the sequence
+ // which invoked Set() up until at least the first call to Set() on it.
+ bool IsSet() const {
+ // Inline here: this has a measurable performance impact on base::WeakPtr.
+ return flag_.load(std::memory_order_acquire) != 0;
+ }
+
+ // Resets the flag. Be careful when using this: callers might not expect
+ // IsSet() to return false after returning true once.
+ void UnsafeResetForTesting();
+
+ private:
+ std::atomic<uint_fast8_t> flag_{0};
+ SEQUENCE_CHECKER(set_sequence_checker_);
+
+ DISALLOW_COPY_AND_ASSIGN(AtomicFlag);
+};
+
+} // namespace base
+
+#endif // BASE_SYNCHRONIZATION_ATOMIC_FLAG_H_
diff --git a/security/sandbox/chromium/base/synchronization/condition_variable.h b/security/sandbox/chromium/base/synchronization/condition_variable.h
new file mode 100644
index 0000000000..d92b738081
--- /dev/null
+++ b/security/sandbox/chromium/base/synchronization/condition_variable.h
@@ -0,0 +1,135 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// ConditionVariable wraps pthreads condition variable synchronization or, on
+// Windows, simulates it. This functionality is very helpful for having
+// several threads wait for an event, as is common with a thread pool managed
+// by a master. The meaning of such an event in the (worker) thread pool
+// scenario is that additional tasks are now available for processing. It is
+// used in Chrome in the DNS prefetching system to notify worker threads that
+// a queue now has items (tasks) which need to be tended to. A related use
+// would have a pool manager waiting on a ConditionVariable, waiting for a
+// thread in the pool to announce (signal) that there is now more room in a
+// (bounded size) communications queue for the manager to deposit tasks, or,
+// as a second example, that the queue of tasks is completely empty and all
+// workers are waiting.
+//
+// USAGE NOTE 1: spurious signal events are possible with this and
+// most implementations of condition variables. As a result, be
+// *sure* to retest your condition before proceeding. The following
+// is a good example of doing this correctly:
+//
+// while (!work_to_be_done()) Wait(...);
+//
+// In contrast do NOT do the following:
+//
+// if (!work_to_be_done()) Wait(...); // Don't do this.
+//
+// Especially avoid the above if you are relying on some other thread only
+// issuing a signal up *if* there is work-to-do. There can/will
+// be spurious signals. Recheck state on waiting thread before
+// assuming the signal was intentional. Caveat caller ;-).
+//
+// USAGE NOTE 2: Broadcast() frees up all waiting threads at once,
+// which leads to contention for the locks they all held when they
+// called Wait(). This results in POOR performance. A much better
+// approach to getting a lot of threads out of Wait() is to have each
+// thread (upon exiting Wait()) call Signal() to free up another
+// Wait'ing thread. Look at condition_variable_unittest.cc for
+// both examples.
+//
+// Broadcast() can be used nicely during teardown, as it gets the job
+// done, and leaves no sleeping threads... and performance is less
+// critical at that point.
+//
+// The semantics of Broadcast() are carefully crafted so that *all*
+// threads that were waiting when the request was made will indeed
+// get signaled. Some implementations mess up, and don't signal them
+// all, while others allow the wait to be effectively turned off (for
+// a while while waiting threads come around). This implementation
+// appears correct, as it will not "lose" any signals, and will guarantee
+// that all threads get signaled by Broadcast().
+//
+// This implementation offers support for "performance" in its selection of
+// which thread to revive. Performance, in direct contrast with "fairness,"
+// assures that the thread that most recently began to Wait() is selected by
+// Signal to revive. Fairness would (if publicly supported) assure that the
+// thread that has Wait()ed the longest is selected. The default policy
+// may improve performance, as the selected thread may have a greater chance of
+// having some of its stack data in various CPU caches.
+
+#ifndef BASE_SYNCHRONIZATION_CONDITION_VARIABLE_H_
+#define BASE_SYNCHRONIZATION_CONDITION_VARIABLE_H_
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+#include <pthread.h>
+#endif
+
+#include "base/base_export.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/synchronization/lock.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include "base/win/windows_types.h"
+#endif
+
+namespace base {
+
+class TimeDelta;
+
+class BASE_EXPORT ConditionVariable {
+ public:
+ // Construct a cv for use with ONLY one user lock.
+ explicit ConditionVariable(Lock* user_lock);
+
+ ~ConditionVariable();
+
+ // Wait() releases the caller's critical section atomically as it starts to
+ // sleep, and the reacquires it when it is signaled. The wait functions are
+ // susceptible to spurious wakeups. (See usage note 1 for more details.)
+ void Wait();
+ void TimedWait(const TimeDelta& max_time);
+
+ // Broadcast() revives all waiting threads. (See usage note 2 for more
+ // details.)
+ void Broadcast();
+ // Signal() revives one waiting thread.
+ void Signal();
+
+ // Declares that this ConditionVariable will only ever be used by a thread
+ // that is idle at the bottom of its stack and waiting for work (in
+ // particular, it is not synchronously waiting on this ConditionVariable
+ // before resuming ongoing work). This is useful to avoid telling
+ // base-internals that this thread is "blocked" when it's merely idle and
+ // ready to do work. As such, this is only expected to be used by thread and
+ // thread pool impls.
+ void declare_only_used_while_idle() { waiting_is_blocking_ = false; }
+
+ private:
+
+#if defined(OS_WIN)
+ CHROME_CONDITION_VARIABLE cv_;
+ CHROME_SRWLOCK* const srwlock_;
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+ pthread_cond_t condition_;
+ pthread_mutex_t* user_mutex_;
+#endif
+
+#if DCHECK_IS_ON()
+ base::Lock* const user_lock_; // Needed to adjust shadow lock state on wait.
+#endif
+
+ // Whether a thread invoking Wait() on this ConditionalVariable should be
+ // considered blocked as opposed to idle (and potentially replaced if part of
+ // a pool).
+ bool waiting_is_blocking_ = true;
+
+ DISALLOW_COPY_AND_ASSIGN(ConditionVariable);
+};
+
+} // namespace base
+
+#endif // BASE_SYNCHRONIZATION_CONDITION_VARIABLE_H_
diff --git a/security/sandbox/chromium/base/synchronization/condition_variable_posix.cc b/security/sandbox/chromium/base/synchronization/condition_variable_posix.cc
new file mode 100644
index 0000000000..189eb360d2
--- /dev/null
+++ b/security/sandbox/chromium/base/synchronization/condition_variable_posix.cc
@@ -0,0 +1,149 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/synchronization/condition_variable.h"
+
+#include <errno.h>
+#include <stdint.h>
+#include <sys/time.h>
+
+#include "base/optional.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/scoped_blocking_call.h"
+#include "base/threading/thread_restrictions.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+
+namespace base {
+
+ConditionVariable::ConditionVariable(Lock* user_lock)
+ : user_mutex_(user_lock->lock_.native_handle())
+#if DCHECK_IS_ON()
+ , user_lock_(user_lock)
+#endif
+{
+ int rv = 0;
+ // http://crbug.com/293736
+ // NaCl doesn't support monotonic clock based absolute deadlines.
+ // On older Android platform versions, it's supported through the
+ // non-standard pthread_cond_timedwait_monotonic_np. Newer platform
+ // versions have pthread_condattr_setclock.
+ // Mac can use relative time deadlines.
+#if !defined(OS_MACOSX) && !defined(OS_NACL) && \
+ !(defined(OS_ANDROID) && defined(HAVE_PTHREAD_COND_TIMEDWAIT_MONOTONIC))
+ pthread_condattr_t attrs;
+ rv = pthread_condattr_init(&attrs);
+ DCHECK_EQ(0, rv);
+ pthread_condattr_setclock(&attrs, CLOCK_MONOTONIC);
+ rv = pthread_cond_init(&condition_, &attrs);
+ pthread_condattr_destroy(&attrs);
+#else
+ rv = pthread_cond_init(&condition_, NULL);
+#endif
+ DCHECK_EQ(0, rv);
+}
+
+ConditionVariable::~ConditionVariable() {
+#if defined(OS_MACOSX)
+ // This hack is necessary to avoid a fatal pthreads subsystem bug in the
+ // Darwin kernel. http://crbug.com/517681.
+ {
+ base::Lock lock;
+ base::AutoLock l(lock);
+ struct timespec ts;
+ ts.tv_sec = 0;
+ ts.tv_nsec = 1;
+ pthread_cond_timedwait_relative_np(&condition_, lock.lock_.native_handle(),
+ &ts);
+ }
+#endif
+
+ int rv = pthread_cond_destroy(&condition_);
+ DCHECK_EQ(0, rv);
+}
+
+void ConditionVariable::Wait() {
+ Optional<internal::ScopedBlockingCallWithBaseSyncPrimitives>
+ scoped_blocking_call;
+ if (waiting_is_blocking_)
+ scoped_blocking_call.emplace(FROM_HERE, BlockingType::MAY_BLOCK);
+
+#if DCHECK_IS_ON()
+ user_lock_->CheckHeldAndUnmark();
+#endif
+ int rv = pthread_cond_wait(&condition_, user_mutex_);
+ DCHECK_EQ(0, rv);
+#if DCHECK_IS_ON()
+ user_lock_->CheckUnheldAndMark();
+#endif
+}
+
+void ConditionVariable::TimedWait(const TimeDelta& max_time) {
+ Optional<internal::ScopedBlockingCallWithBaseSyncPrimitives>
+ scoped_blocking_call;
+ if (waiting_is_blocking_)
+ scoped_blocking_call.emplace(FROM_HERE, BlockingType::MAY_BLOCK);
+
+ int64_t usecs = max_time.InMicroseconds();
+ struct timespec relative_time;
+ relative_time.tv_sec = usecs / Time::kMicrosecondsPerSecond;
+ relative_time.tv_nsec =
+ (usecs % Time::kMicrosecondsPerSecond) * Time::kNanosecondsPerMicrosecond;
+
+#if DCHECK_IS_ON()
+ user_lock_->CheckHeldAndUnmark();
+#endif
+
+#if defined(OS_MACOSX)
+ int rv = pthread_cond_timedwait_relative_np(
+ &condition_, user_mutex_, &relative_time);
+#else
+ // The timeout argument to pthread_cond_timedwait is in absolute time.
+ struct timespec absolute_time;
+#if defined(OS_NACL)
+ // See comment in constructor for why this is different in NaCl.
+ struct timeval now;
+ gettimeofday(&now, NULL);
+ absolute_time.tv_sec = now.tv_sec;
+ absolute_time.tv_nsec = now.tv_usec * Time::kNanosecondsPerMicrosecond;
+#else
+ struct timespec now;
+ clock_gettime(CLOCK_MONOTONIC, &now);
+ absolute_time.tv_sec = now.tv_sec;
+ absolute_time.tv_nsec = now.tv_nsec;
+#endif
+
+ absolute_time.tv_sec += relative_time.tv_sec;
+ absolute_time.tv_nsec += relative_time.tv_nsec;
+ absolute_time.tv_sec += absolute_time.tv_nsec / Time::kNanosecondsPerSecond;
+ absolute_time.tv_nsec %= Time::kNanosecondsPerSecond;
+ DCHECK_GE(absolute_time.tv_sec, now.tv_sec); // Overflow paranoia
+
+#if defined(OS_ANDROID) && defined(HAVE_PTHREAD_COND_TIMEDWAIT_MONOTONIC)
+ int rv = pthread_cond_timedwait_monotonic_np(
+ &condition_, user_mutex_, &absolute_time);
+#else
+ int rv = pthread_cond_timedwait(&condition_, user_mutex_, &absolute_time);
+#endif // OS_ANDROID && HAVE_PTHREAD_COND_TIMEDWAIT_MONOTONIC
+#endif // OS_MACOSX
+
+ // On failure, we only expect the CV to timeout. Any other error value means
+ // that we've unexpectedly woken up.
+ DCHECK(rv == 0 || rv == ETIMEDOUT);
+#if DCHECK_IS_ON()
+ user_lock_->CheckUnheldAndMark();
+#endif
+}
+
+void ConditionVariable::Broadcast() {
+ int rv = pthread_cond_broadcast(&condition_);
+ DCHECK_EQ(0, rv);
+}
+
+void ConditionVariable::Signal() {
+ int rv = pthread_cond_signal(&condition_);
+ DCHECK_EQ(0, rv);
+}
+
+} // namespace base
diff --git a/security/sandbox/chromium/base/synchronization/lock.cc b/security/sandbox/chromium/base/synchronization/lock.cc
new file mode 100644
index 0000000000..03297ada52
--- /dev/null
+++ b/security/sandbox/chromium/base/synchronization/lock.cc
@@ -0,0 +1,38 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is used for debugging assertion support. The Lock class
+// is functionally a wrapper around the LockImpl class, so the only
+// real intelligence in the class is in the debugging logic.
+
+#include "base/synchronization/lock.h"
+
+#if DCHECK_IS_ON()
+
+namespace base {
+
+Lock::Lock() : lock_() {
+}
+
+Lock::~Lock() {
+ DCHECK(owning_thread_ref_.is_null());
+}
+
+void Lock::AssertAcquired() const {
+ DCHECK(owning_thread_ref_ == PlatformThread::CurrentRef());
+}
+
+void Lock::CheckHeldAndUnmark() {
+ DCHECK(owning_thread_ref_ == PlatformThread::CurrentRef());
+ owning_thread_ref_ = PlatformThreadRef();
+}
+
+void Lock::CheckUnheldAndMark() {
+ DCHECK(owning_thread_ref_.is_null());
+ owning_thread_ref_ = PlatformThread::CurrentRef();
+}
+
+} // namespace base
+
+#endif // DCHECK_IS_ON()
diff --git a/security/sandbox/chromium/base/synchronization/lock.h b/security/sandbox/chromium/base/synchronization/lock.h
new file mode 100644
index 0000000000..00095ab3af
--- /dev/null
+++ b/security/sandbox/chromium/base/synchronization/lock.h
@@ -0,0 +1,133 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SYNCHRONIZATION_LOCK_H_
+#define BASE_SYNCHRONIZATION_LOCK_H_
+
+#include "base/base_export.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/synchronization/lock_impl.h"
+#include "base/thread_annotations.h"
+#include "base/threading/platform_thread.h"
+#include "build/build_config.h"
+
+namespace base {
+
+// A convenient wrapper for an OS specific critical section. The only real
+// intelligence in this class is in debug mode for the support for the
+// AssertAcquired() method.
+class LOCKABLE BASE_EXPORT Lock {
+ public:
+#if !DCHECK_IS_ON()
+ // Optimized wrapper implementation
+ Lock() : lock_() {}
+ ~Lock() {}
+
+ // TODO(lukasza): https://crbug.com/831825: Add EXCLUSIVE_LOCK_FUNCTION
+ // annotation to Acquire method and similar annotations to Release and Try
+ // methods (here and in the #else branch).
+ void Acquire() { lock_.Lock(); }
+ void Release() { lock_.Unlock(); }
+
+ // If the lock is not held, take it and return true. If the lock is already
+ // held by another thread, immediately return false. This must not be called
+ // by a thread already holding the lock (what happens is undefined and an
+ // assertion may fail).
+ bool Try() { return lock_.Try(); }
+
+ // Null implementation if not debug.
+ void AssertAcquired() const ASSERT_EXCLUSIVE_LOCK() {}
+#else
+ Lock();
+ ~Lock();
+
+ // NOTE: We do not permit recursive locks and will commonly fire a DCHECK() if
+ // a thread attempts to acquire the lock a second time (while already holding
+ // it).
+ void Acquire() {
+ lock_.Lock();
+ CheckUnheldAndMark();
+ }
+ void Release() {
+ CheckHeldAndUnmark();
+ lock_.Unlock();
+ }
+
+ bool Try() {
+ bool rv = lock_.Try();
+ if (rv) {
+ CheckUnheldAndMark();
+ }
+ return rv;
+ }
+
+ void AssertAcquired() const ASSERT_EXCLUSIVE_LOCK();
+#endif // DCHECK_IS_ON()
+
+ // Whether Lock mitigates priority inversion when used from different thread
+ // priorities.
+ static bool HandlesMultipleThreadPriorities() {
+#if defined(OS_WIN)
+ // Windows mitigates priority inversion by randomly boosting the priority of
+ // ready threads.
+ // https://msdn.microsoft.com/library/windows/desktop/ms684831.aspx
+ return true;
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+ // POSIX mitigates priority inversion by setting the priority of a thread
+ // holding a Lock to the maximum priority of any other thread waiting on it.
+ return internal::LockImpl::PriorityInheritanceAvailable();
+#else
+#error Unsupported platform
+#endif
+ }
+
+ // Both Windows and POSIX implementations of ConditionVariable need to be
+ // able to see our lock and tweak our debugging counters, as they release and
+ // acquire locks inside of their condition variable APIs.
+ friend class ConditionVariable;
+
+ private:
+#if DCHECK_IS_ON()
+ // Members and routines taking care of locks assertions.
+ // Note that this checks for recursive locks and allows them
+ // if the variable is set. This is allowed by the underlying implementation
+ // on windows but not on Posix, so we're doing unneeded checks on Posix.
+ // It's worth it to share the code.
+ void CheckHeldAndUnmark();
+ void CheckUnheldAndMark();
+
+ // All private data is implicitly protected by lock_.
+ // Be VERY careful to only access members under that lock.
+ base::PlatformThreadRef owning_thread_ref_;
+#endif // DCHECK_IS_ON()
+
+ // Platform specific underlying lock implementation.
+ internal::LockImpl lock_;
+
+ DISALLOW_COPY_AND_ASSIGN(Lock);
+};
+
+// A helper class that acquires the given Lock while the AutoLock is in scope.
+using AutoLock = internal::BasicAutoLock<Lock>;
+
+// AutoUnlock is a helper that will Release() the |lock| argument in the
+// constructor, and re-Acquire() it in the destructor.
+using AutoUnlock = internal::BasicAutoUnlock<Lock>;
+
+// Like AutoLock but is a no-op when the provided Lock* is null. Inspired from
+// absl::MutexLockMaybe. Use this instead of base::Optional<base::AutoLock> to
+// get around -Wthread-safety-analysis warnings for conditional locking.
+using AutoLockMaybe = internal::BasicAutoLockMaybe<Lock>;
+
+// Like AutoLock but permits Release() of its mutex before destruction.
+// Release() may be called at most once. Inspired from
+// absl::ReleasableMutexLock. Use this instead of base::Optional<base::AutoLock>
+// to get around -Wthread-safety-analysis warnings for AutoLocks that are
+// explicitly released early (prefer proper scoping to this).
+using ReleasableAutoLock = internal::BasicReleasableAutoLock<Lock>;
+
+} // namespace base
+
+#endif // BASE_SYNCHRONIZATION_LOCK_H_
diff --git a/security/sandbox/chromium/base/synchronization/lock_impl.h b/security/sandbox/chromium/base/synchronization/lock_impl.h
new file mode 100644
index 0000000000..830b878e8e
--- /dev/null
+++ b/security/sandbox/chromium/base/synchronization/lock_impl.h
@@ -0,0 +1,175 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SYNCHRONIZATION_LOCK_IMPL_H_
+#define BASE_SYNCHRONIZATION_LOCK_IMPL_H_
+
+#include "base/base_export.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/thread_annotations.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include "base/win/windows_types.h"
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+#include <errno.h>
+#include <pthread.h>
+#endif
+
+namespace base {
+namespace internal {
+
+// This class implements the underlying platform-specific spin-lock mechanism
+// used for the Lock class. Most users should not use LockImpl directly, but
+// should instead use Lock.
+class BASE_EXPORT LockImpl {
+ public:
+#if defined(OS_WIN)
+ using NativeHandle = CHROME_SRWLOCK;
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+ using NativeHandle = pthread_mutex_t;
+#endif
+
+ LockImpl();
+ ~LockImpl();
+
+ // If the lock is not held, take it and return true. If the lock is already
+ // held by something else, immediately return false.
+ bool Try();
+
+ // Take the lock, blocking until it is available if necessary.
+ void Lock();
+
+ // Release the lock. This must only be called by the lock's holder: after
+ // a successful call to Try, or a call to Lock.
+ inline void Unlock();
+
+ // Return the native underlying lock.
+ // TODO(awalker): refactor lock and condition variables so that this is
+ // unnecessary.
+ NativeHandle* native_handle() { return &native_handle_; }
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+ // Whether this lock will attempt to use priority inheritance.
+ static bool PriorityInheritanceAvailable();
+#endif
+
+ private:
+ NativeHandle native_handle_;
+
+ DISALLOW_COPY_AND_ASSIGN(LockImpl);
+};
+
+#if defined(OS_WIN)
+void LockImpl::Unlock() {
+ ::ReleaseSRWLockExclusive(reinterpret_cast<PSRWLOCK>(&native_handle_));
+}
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+void LockImpl::Unlock() {
+ int rv = pthread_mutex_unlock(&native_handle_);
+ DCHECK_EQ(rv, 0) << ". " << strerror(rv);
+}
+#endif
+
+// This is an implementation used for AutoLock templated on the lock type.
+template <class LockType>
+class SCOPED_LOCKABLE BasicAutoLock {
+ public:
+ struct AlreadyAcquired {};
+
+ explicit BasicAutoLock(LockType& lock) EXCLUSIVE_LOCK_FUNCTION(lock)
+ : lock_(lock) {
+ lock_.Acquire();
+ }
+
+ BasicAutoLock(LockType& lock, const AlreadyAcquired&)
+ EXCLUSIVE_LOCKS_REQUIRED(lock)
+ : lock_(lock) {
+ lock_.AssertAcquired();
+ }
+
+ ~BasicAutoLock() UNLOCK_FUNCTION() {
+ lock_.AssertAcquired();
+ lock_.Release();
+ }
+
+ private:
+ LockType& lock_;
+ DISALLOW_COPY_AND_ASSIGN(BasicAutoLock);
+};
+
+// This is an implementation used for AutoUnlock templated on the lock type.
+template <class LockType>
+class BasicAutoUnlock {
+ public:
+ explicit BasicAutoUnlock(LockType& lock) : lock_(lock) {
+ // We require our caller to have the lock.
+ lock_.AssertAcquired();
+ lock_.Release();
+ }
+
+ ~BasicAutoUnlock() { lock_.Acquire(); }
+
+ private:
+ LockType& lock_;
+ DISALLOW_COPY_AND_ASSIGN(BasicAutoUnlock);
+};
+
+// This is an implementation used for AutoLockMaybe templated on the lock type.
+template <class LockType>
+class SCOPED_LOCKABLE BasicAutoLockMaybe {
+ public:
+ explicit BasicAutoLockMaybe(LockType* lock) EXCLUSIVE_LOCK_FUNCTION(lock)
+ : lock_(lock) {
+ if (lock_)
+ lock_->Acquire();
+ }
+
+ ~BasicAutoLockMaybe() UNLOCK_FUNCTION() {
+ if (lock_) {
+ lock_->AssertAcquired();
+ lock_->Release();
+ }
+ }
+
+ private:
+ LockType* const lock_;
+ DISALLOW_COPY_AND_ASSIGN(BasicAutoLockMaybe);
+};
+
+// This is an implementation used for ReleasableAutoLock templated on the lock
+// type.
+template <class LockType>
+class SCOPED_LOCKABLE BasicReleasableAutoLock {
+ public:
+ explicit BasicReleasableAutoLock(LockType* lock) EXCLUSIVE_LOCK_FUNCTION(lock)
+ : lock_(lock) {
+ DCHECK(lock_);
+ lock_->Acquire();
+ }
+
+ ~BasicReleasableAutoLock() UNLOCK_FUNCTION() {
+ if (lock_) {
+ lock_->AssertAcquired();
+ lock_->Release();
+ }
+ }
+
+ void Release() UNLOCK_FUNCTION() {
+ DCHECK(lock_);
+ lock_->AssertAcquired();
+ lock_->Release();
+ lock_ = nullptr;
+ }
+
+ private:
+ LockType* lock_;
+ DISALLOW_COPY_AND_ASSIGN(BasicReleasableAutoLock);
+};
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_SYNCHRONIZATION_LOCK_IMPL_H_
diff --git a/security/sandbox/chromium/base/synchronization/lock_impl_posix.cc b/security/sandbox/chromium/base/synchronization/lock_impl_posix.cc
new file mode 100644
index 0000000000..7571f68a9a
--- /dev/null
+++ b/security/sandbox/chromium/base/synchronization/lock_impl_posix.cc
@@ -0,0 +1,133 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/synchronization/lock_impl.h"
+
+#include <string>
+
+#include "base/debug/activity_tracker.h"
+#include "base/logging.h"
+#include "base/posix/safe_strerror.h"
+#include "base/strings/stringprintf.h"
+#include "base/synchronization/lock.h"
+#include "base/synchronization/synchronization_buildflags.h"
+#include "build/build_config.h"
+
+namespace base {
+namespace internal {
+
+namespace {
+
+#if DCHECK_IS_ON()
+const char* AdditionalHintForSystemErrorCode(int error_code) {
+ switch (error_code) {
+ case EINVAL:
+ return "Hint: This is often related to a use-after-free.";
+ default:
+ return "";
+ }
+}
+#endif // DCHECK_IS_ON()
+
+std::string SystemErrorCodeToString(int error_code) {
+#if DCHECK_IS_ON()
+ return base::safe_strerror(error_code) + ". " +
+ AdditionalHintForSystemErrorCode(error_code);
+#else // DCHECK_IS_ON()
+ return std::string();
+#endif // DCHECK_IS_ON()
+}
+
+} // namespace
+
+// Determines which platforms can consider using priority inheritance locks. Use
+// this define for platform code that may not compile if priority inheritance
+// locks aren't available. For this platform code,
+// PRIORITY_INHERITANCE_LOCKS_POSSIBLE() is a necessary but insufficient check.
+// Lock::PriorityInheritanceAvailable still must be checked as the code may
+// compile but the underlying platform still may not correctly support priority
+// inheritance locks.
+#if defined(OS_NACL) || defined(OS_ANDROID) || defined(OS_FUCHSIA)
+#define PRIORITY_INHERITANCE_LOCKS_POSSIBLE() 0
+#else
+#define PRIORITY_INHERITANCE_LOCKS_POSSIBLE() 1
+#endif
+
+LockImpl::LockImpl() {
+ pthread_mutexattr_t mta;
+ int rv = pthread_mutexattr_init(&mta);
+ DCHECK_EQ(rv, 0) << ". " << SystemErrorCodeToString(rv);
+#if PRIORITY_INHERITANCE_LOCKS_POSSIBLE()
+ if (PriorityInheritanceAvailable()) {
+ rv = pthread_mutexattr_setprotocol(&mta, PTHREAD_PRIO_INHERIT);
+ DCHECK_EQ(rv, 0) << ". " << SystemErrorCodeToString(rv);
+ }
+#endif
+#ifndef NDEBUG
+ // In debug, setup attributes for lock error checking.
+ rv = pthread_mutexattr_settype(&mta, PTHREAD_MUTEX_ERRORCHECK);
+ DCHECK_EQ(rv, 0) << ". " << SystemErrorCodeToString(rv);
+#endif
+ rv = pthread_mutex_init(&native_handle_, &mta);
+ DCHECK_EQ(rv, 0) << ". " << SystemErrorCodeToString(rv);
+ rv = pthread_mutexattr_destroy(&mta);
+ DCHECK_EQ(rv, 0) << ". " << SystemErrorCodeToString(rv);
+}
+
+LockImpl::~LockImpl() {
+ int rv = pthread_mutex_destroy(&native_handle_);
+ DCHECK_EQ(rv, 0) << ". " << SystemErrorCodeToString(rv);
+}
+
+bool LockImpl::Try() {
+ int rv = pthread_mutex_trylock(&native_handle_);
+ DCHECK(rv == 0 || rv == EBUSY) << ". " << SystemErrorCodeToString(rv);
+ return rv == 0;
+}
+
+void LockImpl::Lock() {
+ // The ScopedLockAcquireActivity below is relatively expensive and so its
+ // actions can become significant due to the very large number of locks
+ // that tend to be used throughout the build. To avoid this cost in the
+ // vast majority of the calls, simply "try" the lock first and only do the
+ // (tracked) blocking call if that fails. Since "try" itself is a system
+ // call, and thus also somewhat expensive, don't bother with it unless
+ // tracking is actually enabled.
+ if (base::debug::GlobalActivityTracker::IsEnabled())
+ if (Try())
+ return;
+
+ base::debug::ScopedLockAcquireActivity lock_activity(this);
+ int rv = pthread_mutex_lock(&native_handle_);
+ DCHECK_EQ(rv, 0) << ". " << SystemErrorCodeToString(rv);
+}
+
+// static
+bool LockImpl::PriorityInheritanceAvailable() {
+#if BUILDFLAG(ENABLE_MUTEX_PRIORITY_INHERITANCE)
+ return true;
+#elif PRIORITY_INHERITANCE_LOCKS_POSSIBLE() && defined(OS_MACOSX)
+ return true;
+#else
+ // Security concerns prevent the use of priority inheritance mutexes on Linux.
+ // * CVE-2010-0622 - Linux < 2.6.33-rc7, wake_futex_pi possible DoS.
+ // https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2010-0622
+ // * CVE-2012-6647 - Linux < 3.5.1, futex_wait_requeue_pi possible DoS.
+ // https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2012-6647
+ // * CVE-2014-3153 - Linux <= 3.14.5, futex_requeue, privilege escalation.
+ // https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2014-3153
+ //
+ // If the above were all addressed, we still need a runtime check to deal with
+ // the bug below.
+ // * glibc Bug 14652: https://sourceware.org/bugzilla/show_bug.cgi?id=14652
+ // Fixed in glibc 2.17.
+ // Priority inheritance mutexes may deadlock with condition variables
+ // during reacquisition of the mutex after the condition variable is
+ // signalled.
+ return false;
+#endif
+}
+
+} // namespace internal
+} // namespace base
diff --git a/security/sandbox/chromium/base/synchronization/lock_impl_win.cc b/security/sandbox/chromium/base/synchronization/lock_impl_win.cc
new file mode 100644
index 0000000000..e0c4e9d7fc
--- /dev/null
+++ b/security/sandbox/chromium/base/synchronization/lock_impl_win.cc
@@ -0,0 +1,40 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/synchronization/lock_impl.h"
+
+#include "base/debug/activity_tracker.h"
+
+#include <windows.h>
+
+namespace base {
+namespace internal {
+
+LockImpl::LockImpl() : native_handle_(SRWLOCK_INIT) {}
+
+LockImpl::~LockImpl() = default;
+
+bool LockImpl::Try() {
+ return !!::TryAcquireSRWLockExclusive(
+ reinterpret_cast<PSRWLOCK>(&native_handle_));
+}
+
+void LockImpl::Lock() {
+ // The ScopedLockAcquireActivity below is relatively expensive and so its
+ // actions can become significant due to the very large number of locks
+ // that tend to be used throughout the build. To avoid this cost in the
+ // vast majority of the calls, simply "try" the lock first and only do the
+ // (tracked) blocking call if that fails. Since "try" itself is a system
+ // call, and thus also somewhat expensive, don't bother with it unless
+ // tracking is actually enabled.
+ if (base::debug::GlobalActivityTracker::IsEnabled())
+ if (Try())
+ return;
+
+ base::debug::ScopedLockAcquireActivity lock_activity(this);
+ ::AcquireSRWLockExclusive(reinterpret_cast<PSRWLOCK>(&native_handle_));
+}
+
+} // namespace internal
+} // namespace base
diff --git a/security/sandbox/chromium/base/synchronization/waitable_event.h b/security/sandbox/chromium/base/synchronization/waitable_event.h
new file mode 100644
index 0000000000..8f78084e0d
--- /dev/null
+++ b/security/sandbox/chromium/base/synchronization/waitable_event.h
@@ -0,0 +1,291 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SYNCHRONIZATION_WAITABLE_EVENT_H_
+#define BASE_SYNCHRONIZATION_WAITABLE_EVENT_H_
+
+#include <stddef.h>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include "base/win/scoped_handle.h"
+#elif defined(OS_MACOSX)
+#include <mach/mach.h>
+
+#include <list>
+#include <memory>
+
+#include "base/callback_forward.h"
+#include "base/mac/scoped_mach_port.h"
+#include "base/memory/ref_counted.h"
+#include "base/synchronization/lock.h"
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+#include <list>
+#include <utility>
+
+#include "base/memory/ref_counted.h"
+#include "base/synchronization/lock.h"
+#endif
+
+namespace base {
+
+class TimeDelta;
+
+// A WaitableEvent can be a useful thread synchronization tool when you want to
+// allow one thread to wait for another thread to finish some work. For
+// non-Windows systems, this can only be used from within a single address
+// space.
+//
+// Use a WaitableEvent when you would otherwise use a Lock+ConditionVariable to
+// protect a simple boolean value. However, if you find yourself using a
+// WaitableEvent in conjunction with a Lock to wait for a more complex state
+// change (e.g., for an item to be added to a queue), then you should probably
+// be using a ConditionVariable instead of a WaitableEvent.
+//
+// NOTE: On Windows, this class provides a subset of the functionality afforded
+// by a Windows event object. This is intentional. If you are writing Windows
+// specific code and you need other features of a Windows event, then you might
+// be better off just using an Windows event directly.
+class BASE_EXPORT WaitableEvent {
+ public:
+ // Indicates whether a WaitableEvent should automatically reset the event
+ // state after a single waiting thread has been released or remain signaled
+ // until Reset() is manually invoked.
+ enum class ResetPolicy { MANUAL, AUTOMATIC };
+
+ // Indicates whether a new WaitableEvent should start in a signaled state or
+ // not.
+ enum class InitialState { SIGNALED, NOT_SIGNALED };
+
+ // Constructs a WaitableEvent with policy and initial state as detailed in
+ // the above enums.
+ WaitableEvent(ResetPolicy reset_policy = ResetPolicy::MANUAL,
+ InitialState initial_state = InitialState::NOT_SIGNALED);
+
+#if defined(OS_WIN)
+ // Create a WaitableEvent from an Event HANDLE which has already been
+ // created. This objects takes ownership of the HANDLE and will close it when
+ // deleted.
+ explicit WaitableEvent(win::ScopedHandle event_handle);
+#endif
+
+ ~WaitableEvent();
+
+ // Put the event in the un-signaled state.
+ void Reset();
+
+ // Put the event in the signaled state. Causing any thread blocked on Wait
+ // to be woken up.
+ void Signal();
+
+ // Returns true if the event is in the signaled state, else false. If this
+ // is not a manual reset event, then this test will cause a reset.
+ bool IsSignaled();
+
+ // Wait indefinitely for the event to be signaled. Wait's return "happens
+ // after" |Signal| has completed. This means that it's safe for a
+ // WaitableEvent to synchronise its own destruction, like this:
+ //
+ // WaitableEvent *e = new WaitableEvent;
+ // SendToOtherThread(e);
+ // e->Wait();
+ // delete e;
+ void Wait();
+
+ // Wait up until wait_delta has passed for the event to be signaled
+ // (real-time; ignores time overrides). Returns true if the event was
+ // signaled. Handles spurious wakeups and guarantees that |wait_delta| will
+ // have elapsed if this returns false.
+ //
+ // TimedWait can synchronise its own destruction like |Wait|.
+ bool TimedWait(const TimeDelta& wait_delta);
+
+#if defined(OS_WIN)
+ HANDLE handle() const { return handle_.Get(); }
+#endif
+
+ // Declares that this WaitableEvent will only ever be used by a thread that is
+ // idle at the bottom of its stack and waiting for work (in particular, it is
+ // not synchronously waiting on this event before resuming ongoing work). This
+ // is useful to avoid telling base-internals that this thread is "blocked"
+ // when it's merely idle and ready to do work. As such, this is only expected
+ // to be used by thread and thread pool impls.
+ void declare_only_used_while_idle() { waiting_is_blocking_ = false; }
+
+ // Wait, synchronously, on multiple events.
+ // waitables: an array of WaitableEvent pointers
+ // count: the number of elements in @waitables
+ //
+ // returns: the index of a WaitableEvent which has been signaled.
+ //
+ // You MUST NOT delete any of the WaitableEvent objects while this wait is
+ // happening, however WaitMany's return "happens after" the |Signal| call
+ // that caused it has completed, like |Wait|.
+ //
+ // If more than one WaitableEvent is signaled to unblock WaitMany, the lowest
+ // index among them is returned.
+ static size_t WaitMany(WaitableEvent** waitables, size_t count);
+
+ // For asynchronous waiting, see WaitableEventWatcher
+
+ // This is a private helper class. It's here because it's used by friends of
+ // this class (such as WaitableEventWatcher) to be able to enqueue elements
+ // of the wait-list
+ class Waiter {
+ public:
+ // Signal the waiter to wake up.
+ //
+ // Consider the case of a Waiter which is in multiple WaitableEvent's
+ // wait-lists. Each WaitableEvent is automatic-reset and two of them are
+ // signaled at the same time. Now, each will wake only the first waiter in
+ // the wake-list before resetting. However, if those two waiters happen to
+ // be the same object (as can happen if another thread didn't have a chance
+ // to dequeue the waiter from the other wait-list in time), two auto-resets
+ // will have happened, but only one waiter has been signaled!
+ //
+ // Because of this, a Waiter may "reject" a wake by returning false. In
+ // this case, the auto-reset WaitableEvent shouldn't act as if anything has
+ // been notified.
+ virtual bool Fire(WaitableEvent* signaling_event) = 0;
+
+ // Waiters may implement this in order to provide an extra condition for
+ // two Waiters to be considered equal. In WaitableEvent::Dequeue, if the
+ // pointers match then this function is called as a final check. See the
+ // comments in ~Handle for why.
+ virtual bool Compare(void* tag) = 0;
+
+ protected:
+ virtual ~Waiter() = default;
+ };
+
+ private:
+ friend class WaitableEventWatcher;
+
+#if defined(OS_WIN)
+ win::ScopedHandle handle_;
+#elif defined(OS_MACOSX)
+ // Prior to macOS 10.12, a TYPE_MACH_RECV dispatch source may not be invoked
+ // immediately. If a WaitableEventWatcher is used on a manual-reset event,
+ // and another thread that is Wait()ing on the event calls Reset()
+ // immediately after waking up, the watcher may not receive the callback.
+ // On macOS 10.12 and higher, dispatch delivery is reliable. But for OSes
+ // prior, a lock-protected list of callbacks is used for manual-reset event
+ // watchers. Automatic-reset events are not prone to this issue, since the
+ // first thread to wake will claim the event.
+ static bool UseSlowWatchList(ResetPolicy policy);
+
+ // Peeks the message queue named by |port| and returns true if a message
+ // is present and false if not. If |dequeue| is true, the messsage will be
+ // drained from the queue. If |dequeue| is false, the queue will only be
+ // peeked. |port| must be a receive right.
+ static bool PeekPort(mach_port_t port, bool dequeue);
+
+ // The Mach receive right is waited on by both WaitableEvent and
+ // WaitableEventWatcher. It is valid to signal and then delete an event, and
+ // a watcher should still be notified. If the right were to be destroyed
+ // immediately, the watcher would not receive the signal. Because Mach
+ // receive rights cannot have a user refcount greater than one, the right
+ // must be reference-counted manually.
+ class ReceiveRight : public RefCountedThreadSafe<ReceiveRight> {
+ public:
+ ReceiveRight(mach_port_t name, bool create_slow_watch_list);
+
+ mach_port_t Name() const { return right_.get(); }
+
+ // This structure is used iff UseSlowWatchList() is true. See the comment
+ // in Signal() for details.
+ struct WatchList {
+ WatchList();
+ ~WatchList();
+
+ // The lock protects a list of closures to be run when the event is
+ // Signal()ed. The closures are invoked on the signaling thread, so they
+ // must be safe to be called from any thread.
+ Lock lock;
+ std::list<OnceClosure> list;
+ };
+
+ WatchList* SlowWatchList() const { return slow_watch_list_.get(); }
+
+ private:
+ friend class RefCountedThreadSafe<ReceiveRight>;
+ ~ReceiveRight();
+
+ mac::ScopedMachReceiveRight right_;
+
+ // This is allocated iff UseSlowWatchList() is true. It is created on the
+ // heap to avoid performing initialization when not using the slow path.
+ std::unique_ptr<WatchList> slow_watch_list_;
+
+ DISALLOW_COPY_AND_ASSIGN(ReceiveRight);
+ };
+
+ const ResetPolicy policy_;
+
+ // The receive right for the event.
+ scoped_refptr<ReceiveRight> receive_right_;
+
+ // The send right used to signal the event. This can be disposed of with
+ // the event, unlike the receive right, since a deleted event cannot be
+ // signaled.
+ mac::ScopedMachSendRight send_right_;
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+ // On Windows, you must not close a HANDLE which is currently being waited on.
+ // The MSDN documentation says that the resulting behaviour is 'undefined'.
+ // To solve that issue each WaitableEventWatcher duplicates the given event
+ // handle.
+
+ // However, if we were to include the following members
+ // directly then, on POSIX, one couldn't use WaitableEventWatcher to watch an
+ // event which gets deleted. This mismatch has bitten us several times now,
+ // so we have a kernel of the WaitableEvent, which is reference counted.
+ // WaitableEventWatchers may then take a reference and thus match the Windows
+ // behaviour.
+ struct WaitableEventKernel :
+ public RefCountedThreadSafe<WaitableEventKernel> {
+ public:
+ WaitableEventKernel(ResetPolicy reset_policy, InitialState initial_state);
+
+ bool Dequeue(Waiter* waiter, void* tag);
+
+ base::Lock lock_;
+ const bool manual_reset_;
+ bool signaled_;
+ std::list<Waiter*> waiters_;
+
+ private:
+ friend class RefCountedThreadSafe<WaitableEventKernel>;
+ ~WaitableEventKernel();
+ };
+
+ typedef std::pair<WaitableEvent*, size_t> WaiterAndIndex;
+
+ // When dealing with arrays of WaitableEvent*, we want to sort by the address
+ // of the WaitableEvent in order to have a globally consistent locking order.
+ // In that case we keep them, in sorted order, in an array of pairs where the
+ // second element is the index of the WaitableEvent in the original,
+ // unsorted, array.
+ static size_t EnqueueMany(WaiterAndIndex* waitables,
+ size_t count, Waiter* waiter);
+
+ bool SignalAll();
+ bool SignalOne();
+ void Enqueue(Waiter* waiter);
+
+ scoped_refptr<WaitableEventKernel> kernel_;
+#endif
+
+ // Whether a thread invoking Wait() on this WaitableEvent should be considered
+ // blocked as opposed to idle (and potentially replaced if part of a pool).
+ bool waiting_is_blocking_ = true;
+
+ DISALLOW_COPY_AND_ASSIGN(WaitableEvent);
+};
+
+} // namespace base
+
+#endif // BASE_SYNCHRONIZATION_WAITABLE_EVENT_H_
diff --git a/security/sandbox/chromium/base/synchronization/waitable_event_posix.cc b/security/sandbox/chromium/base/synchronization/waitable_event_posix.cc
new file mode 100644
index 0000000000..effa899191
--- /dev/null
+++ b/security/sandbox/chromium/base/synchronization/waitable_event_posix.cc
@@ -0,0 +1,445 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+
+#include <algorithm>
+#include <limits>
+#include <vector>
+
+#include "base/debug/activity_tracker.h"
+#include "base/logging.h"
+#include "base/optional.h"
+#include "base/synchronization/condition_variable.h"
+#include "base/synchronization/lock.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/scoped_blocking_call.h"
+#include "base/threading/thread_restrictions.h"
+#include "base/time/time.h"
+#include "base/time/time_override.h"
+
+// -----------------------------------------------------------------------------
+// A WaitableEvent on POSIX is implemented as a wait-list. Currently we don't
+// support cross-process events (where one process can signal an event which
+// others are waiting on). Because of this, we can avoid having one thread per
+// listener in several cases.
+//
+// The WaitableEvent maintains a list of waiters, protected by a lock. Each
+// waiter is either an async wait, in which case we have a Task and the
+// MessageLoop to run it on, or a blocking wait, in which case we have the
+// condition variable to signal.
+//
+// Waiting involves grabbing the lock and adding oneself to the wait list. Async
+// waits can be canceled, which means grabbing the lock and removing oneself
+// from the list.
+//
+// Waiting on multiple events is handled by adding a single, synchronous wait to
+// the wait-list of many events. An event passes a pointer to itself when
+// firing a waiter and so we can store that pointer to find out which event
+// triggered.
+// -----------------------------------------------------------------------------
+
+namespace base {
+
+// -----------------------------------------------------------------------------
+// This is just an abstract base class for waking the two types of waiters
+// -----------------------------------------------------------------------------
+WaitableEvent::WaitableEvent(ResetPolicy reset_policy,
+ InitialState initial_state)
+ : kernel_(new WaitableEventKernel(reset_policy, initial_state)) {}
+
+WaitableEvent::~WaitableEvent() = default;
+
+void WaitableEvent::Reset() {
+ base::AutoLock locked(kernel_->lock_);
+ kernel_->signaled_ = false;
+}
+
+void WaitableEvent::Signal() {
+ base::AutoLock locked(kernel_->lock_);
+
+ if (kernel_->signaled_)
+ return;
+
+ if (kernel_->manual_reset_) {
+ SignalAll();
+ kernel_->signaled_ = true;
+ } else {
+ // In the case of auto reset, if no waiters were woken, we remain
+ // signaled.
+ if (!SignalOne())
+ kernel_->signaled_ = true;
+ }
+}
+
+bool WaitableEvent::IsSignaled() {
+ base::AutoLock locked(kernel_->lock_);
+
+ const bool result = kernel_->signaled_;
+ if (result && !kernel_->manual_reset_)
+ kernel_->signaled_ = false;
+ return result;
+}
+
+// -----------------------------------------------------------------------------
+// Synchronous waits
+
+// -----------------------------------------------------------------------------
+// This is a synchronous waiter. The thread is waiting on the given condition
+// variable and the fired flag in this object.
+// -----------------------------------------------------------------------------
+class SyncWaiter : public WaitableEvent::Waiter {
+ public:
+ SyncWaiter()
+ : fired_(false), signaling_event_(nullptr), lock_(), cv_(&lock_) {}
+
+ bool Fire(WaitableEvent* signaling_event) override {
+ base::AutoLock locked(lock_);
+
+ if (fired_)
+ return false;
+
+ fired_ = true;
+ signaling_event_ = signaling_event;
+
+ cv_.Broadcast();
+
+ // Unlike AsyncWaiter objects, SyncWaiter objects are stack-allocated on
+ // the blocking thread's stack. There is no |delete this;| in Fire. The
+ // SyncWaiter object is destroyed when it goes out of scope.
+
+ return true;
+ }
+
+ WaitableEvent* signaling_event() const {
+ return signaling_event_;
+ }
+
+ // ---------------------------------------------------------------------------
+ // These waiters are always stack allocated and don't delete themselves. Thus
+ // there's no problem and the ABA tag is the same as the object pointer.
+ // ---------------------------------------------------------------------------
+ bool Compare(void* tag) override { return this == tag; }
+
+ // ---------------------------------------------------------------------------
+ // Called with lock held.
+ // ---------------------------------------------------------------------------
+ bool fired() const {
+ return fired_;
+ }
+
+ // ---------------------------------------------------------------------------
+ // During a TimedWait, we need a way to make sure that an auto-reset
+ // WaitableEvent doesn't think that this event has been signaled between
+ // unlocking it and removing it from the wait-list. Called with lock held.
+ // ---------------------------------------------------------------------------
+ void Disable() {
+ fired_ = true;
+ }
+
+ base::Lock* lock() {
+ return &lock_;
+ }
+
+ base::ConditionVariable* cv() {
+ return &cv_;
+ }
+
+ private:
+ bool fired_;
+ WaitableEvent* signaling_event_; // The WaitableEvent which woke us
+ base::Lock lock_;
+ base::ConditionVariable cv_;
+};
+
+void WaitableEvent::Wait() {
+ bool result = TimedWait(TimeDelta::Max());
+ DCHECK(result) << "TimedWait() should never fail with infinite timeout";
+}
+
+bool WaitableEvent::TimedWait(const TimeDelta& wait_delta) {
+ if (wait_delta <= TimeDelta())
+ return IsSignaled();
+
+ // Record the event that this thread is blocking upon (for hang diagnosis) and
+ // consider it blocked for scheduling purposes. Ignore this for non-blocking
+ // WaitableEvents.
+ Optional<debug::ScopedEventWaitActivity> event_activity;
+ Optional<internal::ScopedBlockingCallWithBaseSyncPrimitives>
+ scoped_blocking_call;
+ if (waiting_is_blocking_) {
+ event_activity.emplace(this);
+ scoped_blocking_call.emplace(FROM_HERE, BlockingType::MAY_BLOCK);
+ }
+
+ kernel_->lock_.Acquire();
+ if (kernel_->signaled_) {
+ if (!kernel_->manual_reset_) {
+ // In this case we were signaled when we had no waiters. Now that
+ // someone has waited upon us, we can automatically reset.
+ kernel_->signaled_ = false;
+ }
+
+ kernel_->lock_.Release();
+ return true;
+ }
+
+ SyncWaiter sw;
+ if (!waiting_is_blocking_)
+ sw.cv()->declare_only_used_while_idle();
+ sw.lock()->Acquire();
+
+ Enqueue(&sw);
+ kernel_->lock_.Release();
+ // We are violating locking order here by holding the SyncWaiter lock but not
+ // the WaitableEvent lock. However, this is safe because we don't lock |lock_|
+ // again before unlocking it.
+
+ // TimeTicks takes care of overflow but we special case is_max() nonetheless
+ // to avoid invoking TimeTicksNowIgnoringOverride() unnecessarily (same for
+ // the increment step of the for loop if the condition variable returns
+ // early). Ref: https://crbug.com/910524#c7
+ const TimeTicks end_time =
+ wait_delta.is_max() ? TimeTicks::Max()
+ : subtle::TimeTicksNowIgnoringOverride() + wait_delta;
+ for (TimeDelta remaining = wait_delta; remaining > TimeDelta() && !sw.fired();
+ remaining = end_time.is_max()
+ ? TimeDelta::Max()
+ : end_time - subtle::TimeTicksNowIgnoringOverride()) {
+ if (end_time.is_max())
+ sw.cv()->Wait();
+ else
+ sw.cv()->TimedWait(remaining);
+ }
+
+ // Get the SyncWaiter signaled state before releasing the lock.
+ const bool return_value = sw.fired();
+
+ // We can't acquire |lock_| before releasing the SyncWaiter lock (because of
+ // locking order), however, in between the two a signal could be fired and
+ // |sw| would accept it, however we will still return false, so the signal
+ // would be lost on an auto-reset WaitableEvent. Thus we call Disable which
+ // makes sw::Fire return false.
+ sw.Disable();
+ sw.lock()->Release();
+
+ // This is a bug that has been enshrined in the interface of WaitableEvent
+ // now: |Dequeue| is called even when |sw.fired()| is true, even though it'll
+ // always return false in that case. However, taking the lock ensures that
+ // |Signal| has completed before we return and means that a WaitableEvent can
+ // synchronise its own destruction.
+ kernel_->lock_.Acquire();
+ kernel_->Dequeue(&sw, &sw);
+ kernel_->lock_.Release();
+
+ return return_value;
+}
+
+// -----------------------------------------------------------------------------
+// Synchronous waiting on multiple objects.
+
+static bool // StrictWeakOrdering
+cmp_fst_addr(const std::pair<WaitableEvent*, unsigned> &a,
+ const std::pair<WaitableEvent*, unsigned> &b) {
+ return a.first < b.first;
+}
+
+// static
+size_t WaitableEvent::WaitMany(WaitableEvent** raw_waitables,
+ size_t count) {
+ DCHECK(count) << "Cannot wait on no events";
+ internal::ScopedBlockingCallWithBaseSyncPrimitives scoped_blocking_call(
+ FROM_HERE, BlockingType::MAY_BLOCK);
+ // Record an event (the first) that this thread is blocking upon.
+ debug::ScopedEventWaitActivity event_activity(raw_waitables[0]);
+
+ // We need to acquire the locks in a globally consistent order. Thus we sort
+ // the array of waitables by address. We actually sort a pairs so that we can
+ // map back to the original index values later.
+ std::vector<std::pair<WaitableEvent*, size_t> > waitables;
+ waitables.reserve(count);
+ for (size_t i = 0; i < count; ++i)
+ waitables.push_back(std::make_pair(raw_waitables[i], i));
+
+ DCHECK_EQ(count, waitables.size());
+
+ sort(waitables.begin(), waitables.end(), cmp_fst_addr);
+
+ // The set of waitables must be distinct. Since we have just sorted by
+ // address, we can check this cheaply by comparing pairs of consecutive
+ // elements.
+ for (size_t i = 0; i < waitables.size() - 1; ++i) {
+ DCHECK(waitables[i].first != waitables[i+1].first);
+ }
+
+ SyncWaiter sw;
+
+ const size_t r = EnqueueMany(&waitables[0], count, &sw);
+ if (r < count) {
+ // One of the events is already signaled. The SyncWaiter has not been
+ // enqueued anywhere.
+ return waitables[r].second;
+ }
+
+ // At this point, we hold the locks on all the WaitableEvents and we have
+ // enqueued our waiter in them all.
+ sw.lock()->Acquire();
+ // Release the WaitableEvent locks in the reverse order
+ for (size_t i = 0; i < count; ++i) {
+ waitables[count - (1 + i)].first->kernel_->lock_.Release();
+ }
+
+ for (;;) {
+ if (sw.fired())
+ break;
+
+ sw.cv()->Wait();
+ }
+ sw.lock()->Release();
+
+ // The address of the WaitableEvent which fired is stored in the SyncWaiter.
+ WaitableEvent *const signaled_event = sw.signaling_event();
+ // This will store the index of the raw_waitables which fired.
+ size_t signaled_index = 0;
+
+ // Take the locks of each WaitableEvent in turn (except the signaled one) and
+ // remove our SyncWaiter from the wait-list
+ for (size_t i = 0; i < count; ++i) {
+ if (raw_waitables[i] != signaled_event) {
+ raw_waitables[i]->kernel_->lock_.Acquire();
+ // There's no possible ABA issue with the address of the SyncWaiter here
+ // because it lives on the stack. Thus the tag value is just the pointer
+ // value again.
+ raw_waitables[i]->kernel_->Dequeue(&sw, &sw);
+ raw_waitables[i]->kernel_->lock_.Release();
+ } else {
+ // By taking this lock here we ensure that |Signal| has completed by the
+ // time we return, because |Signal| holds this lock. This matches the
+ // behaviour of |Wait| and |TimedWait|.
+ raw_waitables[i]->kernel_->lock_.Acquire();
+ raw_waitables[i]->kernel_->lock_.Release();
+ signaled_index = i;
+ }
+ }
+
+ return signaled_index;
+}
+
+// -----------------------------------------------------------------------------
+// If return value == count:
+// The locks of the WaitableEvents have been taken in order and the Waiter has
+// been enqueued in the wait-list of each. None of the WaitableEvents are
+// currently signaled
+// else:
+// None of the WaitableEvent locks are held. The Waiter has not been enqueued
+// in any of them and the return value is the index of the WaitableEvent which
+// was signaled with the lowest input index from the original WaitMany call.
+// -----------------------------------------------------------------------------
+// static
+size_t WaitableEvent::EnqueueMany(std::pair<WaitableEvent*, size_t>* waitables,
+ size_t count,
+ Waiter* waiter) {
+ size_t winner = count;
+ size_t winner_index = count;
+ for (size_t i = 0; i < count; ++i) {
+ auto& kernel = waitables[i].first->kernel_;
+ kernel->lock_.Acquire();
+ if (kernel->signaled_ && waitables[i].second < winner) {
+ winner = waitables[i].second;
+ winner_index = i;
+ }
+ }
+
+ // No events signaled. All locks acquired. Enqueue the Waiter on all of them
+ // and return.
+ if (winner == count) {
+ for (size_t i = 0; i < count; ++i)
+ waitables[i].first->Enqueue(waiter);
+ return count;
+ }
+
+ // Unlock in reverse order and possibly clear the chosen winner's signal
+ // before returning its index.
+ for (auto* w = waitables + count - 1; w >= waitables; --w) {
+ auto& kernel = w->first->kernel_;
+ if (w->second == winner) {
+ if (!kernel->manual_reset_)
+ kernel->signaled_ = false;
+ }
+ kernel->lock_.Release();
+ }
+
+ return winner_index;
+}
+
+// -----------------------------------------------------------------------------
+
+
+// -----------------------------------------------------------------------------
+// Private functions...
+
+WaitableEvent::WaitableEventKernel::WaitableEventKernel(
+ ResetPolicy reset_policy,
+ InitialState initial_state)
+ : manual_reset_(reset_policy == ResetPolicy::MANUAL),
+ signaled_(initial_state == InitialState::SIGNALED) {}
+
+WaitableEvent::WaitableEventKernel::~WaitableEventKernel() = default;
+
+// -----------------------------------------------------------------------------
+// Wake all waiting waiters. Called with lock held.
+// -----------------------------------------------------------------------------
+bool WaitableEvent::SignalAll() {
+ bool signaled_at_least_one = false;
+
+ for (auto* i : kernel_->waiters_) {
+ if (i->Fire(this))
+ signaled_at_least_one = true;
+ }
+
+ kernel_->waiters_.clear();
+ return signaled_at_least_one;
+}
+
+// ---------------------------------------------------------------------------
+// Try to wake a single waiter. Return true if one was woken. Called with lock
+// held.
+// ---------------------------------------------------------------------------
+bool WaitableEvent::SignalOne() {
+ for (;;) {
+ if (kernel_->waiters_.empty())
+ return false;
+
+ const bool r = (*kernel_->waiters_.begin())->Fire(this);
+ kernel_->waiters_.pop_front();
+ if (r)
+ return true;
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Add a waiter to the list of those waiting. Called with lock held.
+// -----------------------------------------------------------------------------
+void WaitableEvent::Enqueue(Waiter* waiter) {
+ kernel_->waiters_.push_back(waiter);
+}
+
+// -----------------------------------------------------------------------------
+// Remove a waiter from the list of those waiting. Return true if the waiter was
+// actually removed. Called with lock held.
+// -----------------------------------------------------------------------------
+bool WaitableEvent::WaitableEventKernel::Dequeue(Waiter* waiter, void* tag) {
+ for (auto i = waiters_.begin(); i != waiters_.end(); ++i) {
+ if (*i == waiter && (*i)->Compare(tag)) {
+ waiters_.erase(i);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+// -----------------------------------------------------------------------------
+
+} // namespace base
diff --git a/security/sandbox/chromium/base/task_runner.h b/security/sandbox/chromium/base/task_runner.h
new file mode 100644
index 0000000000..8abf5ef4ba
--- /dev/null
+++ b/security/sandbox/chromium/base/task_runner.h
@@ -0,0 +1,136 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_RUNNER_H_
+#define BASE_TASK_RUNNER_H_
+
+#include <stddef.h>
+
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/location.h"
+#include "base/memory/ref_counted.h"
+#include "base/time/time.h"
+
+namespace base {
+
+struct TaskRunnerTraits;
+
+// A TaskRunner is an object that runs posted tasks (in the form of
+// OnceClosure objects). The TaskRunner interface provides a way of
+// decoupling task posting from the mechanics of how each task will be
+// run. TaskRunner provides very weak guarantees as to how posted
+// tasks are run (or if they're run at all). In particular, it only
+// guarantees:
+//
+// - Posting a task will not run it synchronously. That is, no
+// Post*Task method will call task.Run() directly.
+//
+// - Increasing the delay can only delay when the task gets run.
+// That is, increasing the delay may not affect when the task gets
+// run, or it could make it run later than it normally would, but
+// it won't make it run earlier than it normally would.
+//
+// TaskRunner does not guarantee the order in which posted tasks are
+// run, whether tasks overlap, or whether they're run on a particular
+// thread. Also it does not guarantee a memory model for shared data
+// between tasks. (In other words, you should use your own
+// synchronization/locking primitives if you need to share data
+// between tasks.)
+//
+// Implementations of TaskRunner should be thread-safe in that all
+// methods must be safe to call on any thread. Ownership semantics
+// for TaskRunners are in general not clear, which is why the
+// interface itself is RefCountedThreadSafe.
+//
+// Some theoretical implementations of TaskRunner:
+//
+// - A TaskRunner that uses a thread pool to run posted tasks.
+//
+// - A TaskRunner that, for each task, spawns a non-joinable thread
+// to run that task and immediately quit.
+//
+// - A TaskRunner that stores the list of posted tasks and has a
+// method Run() that runs each runnable task in random order.
+class BASE_EXPORT TaskRunner
+ : public RefCountedThreadSafe<TaskRunner, TaskRunnerTraits> {
+ public:
+ // Posts the given task to be run. Returns true if the task may be
+ // run at some point in the future, and false if the task definitely
+ // will not be run.
+ //
+ // Equivalent to PostDelayedTask(from_here, task, 0).
+ bool PostTask(const Location& from_here, OnceClosure task);
+
+ // Like PostTask, but tries to run the posted task only after |delay_ms|
+ // has passed. Implementations should use a tick clock, rather than wall-
+ // clock time, to implement |delay|.
+ virtual bool PostDelayedTask(const Location& from_here,
+ OnceClosure task,
+ base::TimeDelta delay) = 0;
+
+ // Posts |task| on the current TaskRunner. On completion, |reply|
+ // is posted to the thread that called PostTaskAndReply(). Both
+ // |task| and |reply| are guaranteed to be deleted on the thread
+ // from which PostTaskAndReply() is invoked. This allows objects
+ // that must be deleted on the originating thread to be bound into
+ // the |task| and |reply| OnceClosures. In particular, it can be useful
+ // to use WeakPtr<> in the |reply| OnceClosure so that the reply
+ // operation can be canceled. See the following pseudo-code:
+ //
+ // class DataBuffer : public RefCountedThreadSafe<DataBuffer> {
+ // public:
+ // // Called to add data into a buffer.
+ // void AddData(void* buf, size_t length);
+ // ...
+ // };
+ //
+ //
+ // class DataLoader : public SupportsWeakPtr<DataLoader> {
+ // public:
+ // void GetData() {
+ // scoped_refptr<DataBuffer> buffer = new DataBuffer();
+ // target_thread_.task_runner()->PostTaskAndReply(
+ // FROM_HERE,
+ // base::BindOnce(&DataBuffer::AddData, buffer),
+ // base::BindOnce(&DataLoader::OnDataReceived, AsWeakPtr(), buffer));
+ // }
+ //
+ // private:
+ // void OnDataReceived(scoped_refptr<DataBuffer> buffer) {
+ // // Do something with buffer.
+ // }
+ // };
+ //
+ //
+ // Things to notice:
+ // * Results of |task| are shared with |reply| by binding a shared argument
+ // (a DataBuffer instance).
+ // * The DataLoader object has no special thread safety.
+ // * The DataLoader object can be deleted while |task| is still running,
+ // and the reply will cancel itself safely because it is bound to a
+ // WeakPtr<>.
+ bool PostTaskAndReply(const Location& from_here,
+ OnceClosure task,
+ OnceClosure reply);
+
+ protected:
+ friend struct TaskRunnerTraits;
+
+ TaskRunner();
+ virtual ~TaskRunner();
+
+ // Called when this object should be destroyed. By default simply
+ // deletes |this|, but can be overridden to do something else, like
+ // delete on a certain thread.
+ virtual void OnDestruct() const;
+};
+
+struct BASE_EXPORT TaskRunnerTraits {
+ static void Destruct(const TaskRunner* task_runner);
+};
+
+} // namespace base
+
+#endif // BASE_TASK_RUNNER_H_
diff --git a/security/sandbox/chromium/base/template_util.h b/security/sandbox/chromium/base/template_util.h
new file mode 100644
index 0000000000..51bd085279
--- /dev/null
+++ b/security/sandbox/chromium/base/template_util.h
@@ -0,0 +1,188 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEMPLATE_UTIL_H_
+#define BASE_TEMPLATE_UTIL_H_
+
+#include <stddef.h>
+#include <iosfwd>
+#include <iterator>
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+#include "build/build_config.h"
+
+// Some versions of libstdc++ have partial support for type_traits, but misses
+// a smaller subset while removing some of the older non-standard stuff. Assume
+// that all versions below 5.0 fall in this category, along with one 5.0
+// experimental release. Test for this by consulting compiler major version,
+// the only reliable option available, so theoretically this could fail should
+// you attempt to mix an earlier version of libstdc++ with >= GCC5. But
+// that's unlikely to work out, especially as GCC5 changed ABI.
+#define CR_GLIBCXX_5_0_0 20150123
+#if (defined(__GNUC__) && __GNUC__ < 5) || \
+ (defined(__GLIBCXX__) && __GLIBCXX__ == CR_GLIBCXX_5_0_0)
+#define CR_USE_FALLBACKS_FOR_OLD_EXPERIMENTAL_GLIBCXX
+#endif
+
+// This hacks around using gcc with libc++ which has some incompatibilies.
+// - is_trivially_* doesn't work: https://llvm.org/bugs/show_bug.cgi?id=27538
+// TODO(danakj): Remove this when android builders are all using a newer version
+// of gcc, or the android ndk is updated to a newer libc++ that works with older
+// gcc versions.
+#if !defined(__clang__) && defined(_LIBCPP_VERSION)
+#define CR_USE_FALLBACKS_FOR_GCC_WITH_LIBCXX
+#endif
+
+namespace base {
+
+template <class T> struct is_non_const_reference : std::false_type {};
+template <class T> struct is_non_const_reference<T&> : std::true_type {};
+template <class T> struct is_non_const_reference<const T&> : std::false_type {};
+
+namespace internal {
+
+// Implementation detail of base::void_t below.
+template <typename...>
+struct make_void {
+ using type = void;
+};
+
+} // namespace internal
+
+// base::void_t is an implementation of std::void_t from C++17.
+//
+// We use |base::internal::make_void| as a helper struct to avoid a C++14
+// defect:
+// http://en.cppreference.com/w/cpp/types/void_t
+// http://open-std.org/JTC1/SC22/WG21/docs/cwg_defects.html#1558
+template <typename... Ts>
+using void_t = typename ::base::internal::make_void<Ts...>::type;
+
+namespace internal {
+
+// Uses expression SFINAE to detect whether using operator<< would work.
+template <typename T, typename = void>
+struct SupportsOstreamOperator : std::false_type {};
+template <typename T>
+struct SupportsOstreamOperator<T,
+ decltype(void(std::declval<std::ostream&>()
+ << std::declval<T>()))>
+ : std::true_type {};
+
+template <typename T, typename = void>
+struct SupportsToString : std::false_type {};
+template <typename T>
+struct SupportsToString<T, decltype(void(std::declval<T>().ToString()))>
+ : std::true_type {};
+
+// Used to detech whether the given type is an iterator. This is normally used
+// with std::enable_if to provide disambiguation for functions that take
+// templatzed iterators as input.
+template <typename T, typename = void>
+struct is_iterator : std::false_type {};
+
+template <typename T>
+struct is_iterator<T,
+ void_t<typename std::iterator_traits<T>::iterator_category>>
+ : std::true_type {};
+
+} // namespace internal
+
+// is_trivially_copyable is especially hard to get right.
+// - Older versions of libstdc++ will fail to have it like they do for other
+// type traits. This has become a subset of the second point, but used to be
+// handled independently.
+// - An experimental release of gcc includes most of type_traits but misses
+// is_trivially_copyable, so we still have to avoid using libstdc++ in this
+// case, which is covered by CR_USE_FALLBACKS_FOR_OLD_EXPERIMENTAL_GLIBCXX.
+// - When compiling libc++ from before r239653, with a gcc compiler, the
+// std::is_trivially_copyable can fail. So we need to work around that by not
+// using the one in libc++ in this case. This is covered by the
+// CR_USE_FALLBACKS_FOR_GCC_WITH_LIBCXX define, and is discussed in
+// https://llvm.org/bugs/show_bug.cgi?id=27538#c1 where they point out that
+// in libc++'s commit r239653 this is fixed by libc++ checking for gcc 5.1.
+// - In both of the above cases we are using the gcc compiler. When defining
+// this ourselves on compiler intrinsics, the __is_trivially_copyable()
+// intrinsic is not available on gcc before version 5.1 (see the discussion in
+// https://llvm.org/bugs/show_bug.cgi?id=27538#c1 again), so we must check for
+// that version.
+// - When __is_trivially_copyable() is not available because we are on gcc older
+// than 5.1, we need to fall back to something, so we use __has_trivial_copy()
+// instead based on what was done one-off in bit_cast() previously.
+
+// TODO(crbug.com/554293): Remove this when all platforms have this in the std
+// namespace and it works with gcc as needed.
+#if defined(CR_USE_FALLBACKS_FOR_OLD_EXPERIMENTAL_GLIBCXX) || \
+ defined(CR_USE_FALLBACKS_FOR_GCC_WITH_LIBCXX)
+template <typename T>
+struct is_trivially_copyable {
+// TODO(danakj): Remove this when android builders are all using a newer version
+// of gcc, or the android ndk is updated to a newer libc++ that does this for
+// us.
+#if _GNUC_VER >= 501
+ static constexpr bool value = __is_trivially_copyable(T);
+#else
+ static constexpr bool value =
+ __has_trivial_copy(T) && __has_trivial_destructor(T);
+#endif
+};
+#else
+template <class T>
+using is_trivially_copyable = std::is_trivially_copyable<T>;
+#endif
+
+#if defined(__GNUC__) && !defined(__clang__) && __GNUC__ <= 7
+// Workaround for g++7 and earlier family.
+// Due to https://gcc.gnu.org/bugzilla/show_bug.cgi?id=80654, without this
+// Optional<std::vector<T>> where T is non-copyable causes a compile error.
+// As we know it is not trivially copy constructible, explicitly declare so.
+template <typename T>
+struct is_trivially_copy_constructible
+ : std::is_trivially_copy_constructible<T> {};
+
+template <typename... T>
+struct is_trivially_copy_constructible<std::vector<T...>> : std::false_type {};
+#else
+// Otherwise use std::is_trivially_copy_constructible as is.
+template <typename T>
+using is_trivially_copy_constructible = std::is_trivially_copy_constructible<T>;
+#endif
+
+// base::in_place_t is an implementation of std::in_place_t from
+// C++17. A tag type used to request in-place construction in template vararg
+// constructors.
+
+// Specification:
+// https://en.cppreference.com/w/cpp/utility/in_place
+struct in_place_t {};
+constexpr in_place_t in_place = {};
+
+// base::in_place_type_t is an implementation of std::in_place_type_t from
+// C++17. A tag type used for in-place construction when the type to construct
+// needs to be specified, such as with base::unique_any, designed to be a
+// drop-in replacement.
+
+// Specification:
+// http://en.cppreference.com/w/cpp/utility/in_place
+template <typename T>
+struct in_place_type_t {};
+
+template <typename T>
+struct is_in_place_type_t {
+ static constexpr bool value = false;
+};
+
+template <typename... Ts>
+struct is_in_place_type_t<in_place_type_t<Ts...>> {
+ static constexpr bool value = true;
+};
+
+} // namespace base
+
+#undef CR_USE_FALLBACKS_FOR_GCC_WITH_LIBCXX
+#undef CR_USE_FALLBACKS_FOR_OLD_EXPERIMENTAL_GLIBCXX
+
+#endif // BASE_TEMPLATE_UTIL_H_
diff --git a/security/sandbox/chromium/base/third_party/cityhash/COPYING b/security/sandbox/chromium/base/third_party/cityhash/COPYING
new file mode 100644
index 0000000000..bf15194dd5
--- /dev/null
+++ b/security/sandbox/chromium/base/third_party/cityhash/COPYING
@@ -0,0 +1,19 @@
+// Copyright (c) 2011 Google, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
diff --git a/security/sandbox/chromium/base/third_party/cityhash/city.cc b/security/sandbox/chromium/base/third_party/cityhash/city.cc
new file mode 100644
index 0000000000..b0d2294aab
--- /dev/null
+++ b/security/sandbox/chromium/base/third_party/cityhash/city.cc
@@ -0,0 +1,532 @@
+// Copyright (c) 2011 Google, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+//
+// CityHash, by Geoff Pike and Jyrki Alakuijala
+//
+// This file provides CityHash64() and related functions.
+//
+// It's probably possible to create even faster hash functions by
+// writing a program that systematically explores some of the space of
+// possible hash functions, by using SIMD instructions, or by
+// compromising on hash quality.
+
+#include "city.h"
+
+#include <string.h> // for memcpy and memset
+#include <algorithm>
+
+using std::make_pair;
+using std::pair;
+
+#ifdef _MSC_VER
+
+#include <stdlib.h>
+#define bswap_32(x) _byteswap_ulong(x)
+#define bswap_64(x) _byteswap_uint64(x)
+
+#elif defined(__APPLE__)
+
+// Mac OS X / Darwin features
+#include <libkern/OSByteOrder.h>
+#define bswap_32(x) OSSwapInt32(x)
+#define bswap_64(x) OSSwapInt64(x)
+
+#elif defined(__sun) || defined(sun)
+
+#include <sys/byteorder.h>
+#define bswap_32(x) BSWAP_32(x)
+#define bswap_64(x) BSWAP_64(x)
+
+#elif defined(__FreeBSD__)
+
+#include <sys/endian.h>
+#define bswap_32(x) bswap32(x)
+#define bswap_64(x) bswap64(x)
+
+#elif defined(__OpenBSD__)
+
+#include <sys/types.h>
+#define bswap_32(x) swap32(x)
+#define bswap_64(x) swap64(x)
+
+#elif defined(__NetBSD__)
+
+#include <machine/bswap.h>
+#include <sys/types.h>
+#if defined(__BSWAP_RENAME) && !defined(__bswap_32)
+#define bswap_32(x) bswap32(x)
+#define bswap_64(x) bswap64(x)
+#endif
+
+#else
+
+// XXX(cavalcanti): building 'native_client' fails with this header.
+//#include <byteswap.h>
+
+// Falling back to compiler builtins instead.
+#define bswap_32(x) __builtin_bswap32(x)
+#define bswap_64(x) __builtin_bswap64(x)
+
+#endif
+
+namespace base {
+namespace internal {
+namespace cityhash_v111 {
+
+#ifdef WORDS_BIGENDIAN
+#define uint32_in_expected_order(x) (bswap_32(x))
+#define uint64_in_expected_order(x) (bswap_64(x))
+#else
+#define uint32_in_expected_order(x) (x)
+#define uint64_in_expected_order(x) (x)
+#endif
+
+#if !defined(LIKELY)
+#if HAVE_BUILTIN_EXPECT
+#define LIKELY(x) (__builtin_expect(!!(x), 1))
+#else
+#define LIKELY(x) (x)
+#endif
+#endif
+
+static uint64 UNALIGNED_LOAD64(const char* p) {
+ uint64 result;
+ memcpy(&result, p, sizeof(result));
+ return result;
+}
+
+static uint32 UNALIGNED_LOAD32(const char* p) {
+ uint32 result;
+ memcpy(&result, p, sizeof(result));
+ return result;
+}
+
+static uint64 Fetch64(const char* p) {
+ return uint64_in_expected_order(UNALIGNED_LOAD64(p));
+}
+
+static uint32 Fetch32(const char* p) {
+ return uint32_in_expected_order(UNALIGNED_LOAD32(p));
+}
+
+// Some primes between 2^63 and 2^64 for various uses.
+static const uint64 k0 = 0xc3a5c85c97cb3127ULL;
+static const uint64 k1 = 0xb492b66fbe98f273ULL;
+static const uint64 k2 = 0x9ae16a3b2f90404fULL;
+
+// Magic numbers for 32-bit hashing. Copied from Murmur3.
+static const uint32 c1 = 0xcc9e2d51;
+static const uint32 c2 = 0x1b873593;
+
+// A 32-bit to 32-bit integer hash copied from Murmur3.
+static uint32 fmix(uint32 h) {
+ h ^= h >> 16;
+ h *= 0x85ebca6b;
+ h ^= h >> 13;
+ h *= 0xc2b2ae35;
+ h ^= h >> 16;
+ return h;
+}
+
+static uint32 Rotate32(uint32 val, int shift) {
+ // Avoid shifting by 32: doing so yields an undefined result.
+ return shift == 0 ? val : ((val >> shift) | (val << (32 - shift)));
+}
+
+#undef PERMUTE3
+#define PERMUTE3(a, b, c) \
+ do { \
+ std::swap(a, b); \
+ std::swap(a, c); \
+ } while (0)
+
+static uint32 Mur(uint32 a, uint32 h) {
+ // Helper from Murmur3 for combining two 32-bit values.
+ a *= c1;
+ a = Rotate32(a, 17);
+ a *= c2;
+ h ^= a;
+ h = Rotate32(h, 19);
+ return h * 5 + 0xe6546b64;
+}
+
+static uint32 Hash32Len13to24(const char* s, size_t len) {
+ uint32 a = Fetch32(s - 4 + (len >> 1));
+ uint32 b = Fetch32(s + 4);
+ uint32 c = Fetch32(s + len - 8);
+ uint32 d = Fetch32(s + (len >> 1));
+ uint32 e = Fetch32(s);
+ uint32 f = Fetch32(s + len - 4);
+ uint32 h = len;
+
+ return fmix(Mur(f, Mur(e, Mur(d, Mur(c, Mur(b, Mur(a, h)))))));
+}
+
+static uint32 Hash32Len0to4(const char* s, size_t len) {
+ uint32 b = 0;
+ uint32 c = 9;
+ for (size_t i = 0; i < len; i++) {
+ signed char v = s[i];
+ b = b * c1 + v;
+ c ^= b;
+ }
+ return fmix(Mur(b, Mur(len, c)));
+}
+
+static uint32 Hash32Len5to12(const char* s, size_t len) {
+ uint32 a = len, b = len * 5, c = 9, d = b;
+ a += Fetch32(s);
+ b += Fetch32(s + len - 4);
+ c += Fetch32(s + ((len >> 1) & 4));
+ return fmix(Mur(c, Mur(b, Mur(a, d))));
+}
+
+uint32 CityHash32(const char* s, size_t len) {
+ if (len <= 24) {
+ return len <= 12
+ ? (len <= 4 ? Hash32Len0to4(s, len) : Hash32Len5to12(s, len))
+ : Hash32Len13to24(s, len);
+ }
+
+ // len > 24
+ uint32 h = len, g = c1 * len, f = g;
+ uint32 a0 = Rotate32(Fetch32(s + len - 4) * c1, 17) * c2;
+ uint32 a1 = Rotate32(Fetch32(s + len - 8) * c1, 17) * c2;
+ uint32 a2 = Rotate32(Fetch32(s + len - 16) * c1, 17) * c2;
+ uint32 a3 = Rotate32(Fetch32(s + len - 12) * c1, 17) * c2;
+ uint32 a4 = Rotate32(Fetch32(s + len - 20) * c1, 17) * c2;
+ h ^= a0;
+ h = Rotate32(h, 19);
+ h = h * 5 + 0xe6546b64;
+ h ^= a2;
+ h = Rotate32(h, 19);
+ h = h * 5 + 0xe6546b64;
+ g ^= a1;
+ g = Rotate32(g, 19);
+ g = g * 5 + 0xe6546b64;
+ g ^= a3;
+ g = Rotate32(g, 19);
+ g = g * 5 + 0xe6546b64;
+ f += a4;
+ f = Rotate32(f, 19);
+ f = f * 5 + 0xe6546b64;
+ size_t iters = (len - 1) / 20;
+ do {
+ a0 = Rotate32(Fetch32(s) * c1, 17) * c2;
+ a1 = Fetch32(s + 4);
+ a2 = Rotate32(Fetch32(s + 8) * c1, 17) * c2;
+ a3 = Rotate32(Fetch32(s + 12) * c1, 17) * c2;
+ a4 = Fetch32(s + 16);
+ h ^= a0;
+ h = Rotate32(h, 18);
+ h = h * 5 + 0xe6546b64;
+ f += a1;
+ f = Rotate32(f, 19);
+ f = f * c1;
+ g += a2;
+ g = Rotate32(g, 18);
+ g = g * 5 + 0xe6546b64;
+ h ^= a3 + a1;
+ h = Rotate32(h, 19);
+ h = h * 5 + 0xe6546b64;
+ g ^= a4;
+ g = bswap_32(g) * 5;
+ h += a4 * 5;
+ h = bswap_32(h);
+ f += a0;
+ PERMUTE3(f, h, g);
+ s += 20;
+ } while (--iters != 0);
+ g = Rotate32(g, 11) * c1;
+ g = Rotate32(g, 17) * c1;
+ f = Rotate32(f, 11) * c1;
+ f = Rotate32(f, 17) * c1;
+ h = Rotate32(h + g, 19);
+ h = h * 5 + 0xe6546b64;
+ h = Rotate32(h, 17) * c1;
+ h = Rotate32(h + f, 19);
+ h = h * 5 + 0xe6546b64;
+ h = Rotate32(h, 17) * c1;
+ return h;
+}
+
+// Bitwise right rotate. Normally this will compile to a single
+// instruction, especially if the shift is a manifest constant.
+static uint64 Rotate(uint64 val, int shift) {
+ // Avoid shifting by 64: doing so yields an undefined result.
+ return shift == 0 ? val : ((val >> shift) | (val << (64 - shift)));
+}
+
+static uint64 ShiftMix(uint64 val) {
+ return val ^ (val >> 47);
+}
+
+static uint64 HashLen16(uint64 u, uint64 v) {
+ return Hash128to64(uint128(u, v));
+}
+
+static uint64 HashLen16(uint64 u, uint64 v, uint64 mul) {
+ // Murmur-inspired hashing.
+ uint64 a = (u ^ v) * mul;
+ a ^= (a >> 47);
+ uint64 b = (v ^ a) * mul;
+ b ^= (b >> 47);
+ b *= mul;
+ return b;
+}
+
+static uint64 HashLen0to16(const char* s, size_t len) {
+ if (len >= 8) {
+ uint64 mul = k2 + len * 2;
+ uint64 a = Fetch64(s) + k2;
+ uint64 b = Fetch64(s + len - 8);
+ uint64 c = Rotate(b, 37) * mul + a;
+ uint64 d = (Rotate(a, 25) + b) * mul;
+ return HashLen16(c, d, mul);
+ }
+ if (len >= 4) {
+ uint64 mul = k2 + len * 2;
+ uint64 a = Fetch32(s);
+ return HashLen16(len + (a << 3), Fetch32(s + len - 4), mul);
+ }
+ if (len > 0) {
+ uint8 a = s[0];
+ uint8 b = s[len >> 1];
+ uint8 c = s[len - 1];
+ uint32 y = static_cast<uint32>(a) + (static_cast<uint32>(b) << 8);
+ uint32 z = len + (static_cast<uint32>(c) << 2);
+ return ShiftMix(y * k2 ^ z * k0) * k2;
+ }
+ return k2;
+}
+
+// This probably works well for 16-byte strings as well, but it may be overkill
+// in that case.
+static uint64 HashLen17to32(const char* s, size_t len) {
+ uint64 mul = k2 + len * 2;
+ uint64 a = Fetch64(s) * k1;
+ uint64 b = Fetch64(s + 8);
+ uint64 c = Fetch64(s + len - 8) * mul;
+ uint64 d = Fetch64(s + len - 16) * k2;
+ return HashLen16(Rotate(a + b, 43) + Rotate(c, 30) + d,
+ a + Rotate(b + k2, 18) + c, mul);
+}
+
+// Return a 16-byte hash for 48 bytes. Quick and dirty.
+// Callers do best to use "random-looking" values for a and b.
+static pair<uint64, uint64> WeakHashLen32WithSeeds(uint64 w,
+ uint64 x,
+ uint64 y,
+ uint64 z,
+ uint64 a,
+ uint64 b) {
+ a += w;
+ b = Rotate(b + a + z, 21);
+ uint64 c = a;
+ a += x;
+ a += y;
+ b += Rotate(a, 44);
+ return make_pair(a + z, b + c);
+}
+
+// Return a 16-byte hash for s[0] ... s[31], a, and b. Quick and dirty.
+static pair<uint64, uint64> WeakHashLen32WithSeeds(const char* s,
+ uint64 a,
+ uint64 b) {
+ return WeakHashLen32WithSeeds(Fetch64(s), Fetch64(s + 8), Fetch64(s + 16),
+ Fetch64(s + 24), a, b);
+}
+
+// Return an 8-byte hash for 33 to 64 bytes.
+static uint64 HashLen33to64(const char* s, size_t len) {
+ uint64 mul = k2 + len * 2;
+ uint64 a = Fetch64(s) * k2;
+ uint64 b = Fetch64(s + 8);
+ uint64 c = Fetch64(s + len - 24);
+ uint64 d = Fetch64(s + len - 32);
+ uint64 e = Fetch64(s + 16) * k2;
+ uint64 f = Fetch64(s + 24) * 9;
+ uint64 g = Fetch64(s + len - 8);
+ uint64 h = Fetch64(s + len - 16) * mul;
+ uint64 u = Rotate(a + g, 43) + (Rotate(b, 30) + c) * 9;
+ uint64 v = ((a + g) ^ d) + f + 1;
+ uint64 w = bswap_64((u + v) * mul) + h;
+ uint64 x = Rotate(e + f, 42) + c;
+ uint64 y = (bswap_64((v + w) * mul) + g) * mul;
+ uint64 z = e + f + c;
+ a = bswap_64((x + z) * mul + y) + b;
+ b = ShiftMix((z + a) * mul + d + h) * mul;
+ return b + x;
+}
+
+uint64 CityHash64(const char* s, size_t len) {
+ if (len <= 32) {
+ if (len <= 16) {
+ return HashLen0to16(s, len);
+ } else {
+ return HashLen17to32(s, len);
+ }
+ } else if (len <= 64) {
+ return HashLen33to64(s, len);
+ }
+
+ // For strings over 64 bytes we hash the end first, and then as we
+ // loop we keep 56 bytes of state: v, w, x, y, and z.
+ uint64 x = Fetch64(s + len - 40);
+ uint64 y = Fetch64(s + len - 16) + Fetch64(s + len - 56);
+ uint64 z = HashLen16(Fetch64(s + len - 48) + len, Fetch64(s + len - 24));
+ pair<uint64, uint64> v = WeakHashLen32WithSeeds(s + len - 64, len, z);
+ pair<uint64, uint64> w = WeakHashLen32WithSeeds(s + len - 32, y + k1, x);
+ x = x * k1 + Fetch64(s);
+
+ // Decrease len to the nearest multiple of 64, and operate on 64-byte chunks.
+ len = (len - 1) & ~static_cast<size_t>(63);
+ do {
+ x = Rotate(x + y + v.first + Fetch64(s + 8), 37) * k1;
+ y = Rotate(y + v.second + Fetch64(s + 48), 42) * k1;
+ x ^= w.second;
+ y += v.first + Fetch64(s + 40);
+ z = Rotate(z + w.first, 33) * k1;
+ v = WeakHashLen32WithSeeds(s, v.second * k1, x + w.first);
+ w = WeakHashLen32WithSeeds(s + 32, z + w.second, y + Fetch64(s + 16));
+ std::swap(z, x);
+ s += 64;
+ len -= 64;
+ } while (len != 0);
+ return HashLen16(HashLen16(v.first, w.first) + ShiftMix(y) * k1 + z,
+ HashLen16(v.second, w.second) + x);
+}
+
+uint64 CityHash64WithSeed(const char* s, size_t len, uint64 seed) {
+ return CityHash64WithSeeds(s, len, k2, seed);
+}
+
+uint64 CityHash64WithSeeds(const char* s,
+ size_t len,
+ uint64 seed0,
+ uint64 seed1) {
+ return HashLen16(CityHash64(s, len) - seed0, seed1);
+}
+
+// A subroutine for CityHash128(). Returns a decent 128-bit hash for strings
+// of any length representable in signed long. Based on City and Murmur.
+static uint128 CityMurmur(const char* s, size_t len, uint128 seed) {
+ uint64 a = Uint128Low64(seed);
+ uint64 b = Uint128High64(seed);
+ uint64 c = 0;
+ uint64 d = 0;
+ signed long l = len - 16;
+ if (l <= 0) { // len <= 16
+ a = ShiftMix(a * k1) * k1;
+ c = b * k1 + HashLen0to16(s, len);
+ d = ShiftMix(a + (len >= 8 ? Fetch64(s) : c));
+ } else { // len > 16
+ c = HashLen16(Fetch64(s + len - 8) + k1, a);
+ d = HashLen16(b + len, c + Fetch64(s + len - 16));
+ a += d;
+ do {
+ a ^= ShiftMix(Fetch64(s) * k1) * k1;
+ a *= k1;
+ b ^= a;
+ c ^= ShiftMix(Fetch64(s + 8) * k1) * k1;
+ c *= k1;
+ d ^= c;
+ s += 16;
+ l -= 16;
+ } while (l > 0);
+ }
+ a = HashLen16(a, c);
+ b = HashLen16(d, b);
+ return uint128(a ^ b, HashLen16(b, a));
+}
+
+uint128 CityHash128WithSeed(const char* s, size_t len, uint128 seed) {
+ if (len < 128) {
+ return CityMurmur(s, len, seed);
+ }
+
+ // We expect len >= 128 to be the common case. Keep 56 bytes of state:
+ // v, w, x, y, and z.
+ pair<uint64, uint64> v, w;
+ uint64 x = Uint128Low64(seed);
+ uint64 y = Uint128High64(seed);
+ uint64 z = len * k1;
+ v.first = Rotate(y ^ k1, 49) * k1 + Fetch64(s);
+ v.second = Rotate(v.first, 42) * k1 + Fetch64(s + 8);
+ w.first = Rotate(y + z, 35) * k1 + x;
+ w.second = Rotate(x + Fetch64(s + 88), 53) * k1;
+
+ // This is the same inner loop as CityHash64(), manually unrolled.
+ do {
+ x = Rotate(x + y + v.first + Fetch64(s + 8), 37) * k1;
+ y = Rotate(y + v.second + Fetch64(s + 48), 42) * k1;
+ x ^= w.second;
+ y += v.first + Fetch64(s + 40);
+ z = Rotate(z + w.first, 33) * k1;
+ v = WeakHashLen32WithSeeds(s, v.second * k1, x + w.first);
+ w = WeakHashLen32WithSeeds(s + 32, z + w.second, y + Fetch64(s + 16));
+ std::swap(z, x);
+ s += 64;
+ x = Rotate(x + y + v.first + Fetch64(s + 8), 37) * k1;
+ y = Rotate(y + v.second + Fetch64(s + 48), 42) * k1;
+ x ^= w.second;
+ y += v.first + Fetch64(s + 40);
+ z = Rotate(z + w.first, 33) * k1;
+ v = WeakHashLen32WithSeeds(s, v.second * k1, x + w.first);
+ w = WeakHashLen32WithSeeds(s + 32, z + w.second, y + Fetch64(s + 16));
+ std::swap(z, x);
+ s += 64;
+ len -= 128;
+ } while (LIKELY(len >= 128));
+ x += Rotate(v.first + z, 49) * k0;
+ y = y * k0 + Rotate(w.second, 37);
+ z = z * k0 + Rotate(w.first, 27);
+ w.first *= 9;
+ v.first *= k0;
+ // If 0 < len < 128, hash up to 4 chunks of 32 bytes each from the end of s.
+ for (size_t tail_done = 0; tail_done < len;) {
+ tail_done += 32;
+ y = Rotate(x + y, 42) * k0 + v.second;
+ w.first += Fetch64(s + len - tail_done + 16);
+ x = x * k0 + w.first;
+ z += w.second + Fetch64(s + len - tail_done);
+ w.second += v.first;
+ v = WeakHashLen32WithSeeds(s + len - tail_done, v.first + z, v.second);
+ v.first *= k0;
+ }
+ // At this point our 56 bytes of state should contain more than
+ // enough information for a strong 128-bit hash. We use two
+ // different 56-byte-to-8-byte hashes to get a 16-byte final result.
+ x = HashLen16(x, v.first);
+ y = HashLen16(y + z, w.first);
+ return uint128(HashLen16(x + v.second, w.second) + y,
+ HashLen16(x + w.second, y + v.second));
+}
+
+uint128 CityHash128(const char* s, size_t len) {
+ return len >= 16
+ ? CityHash128WithSeed(s + 16, len - 16,
+ uint128(Fetch64(s), Fetch64(s + 8) + k0))
+ : CityHash128WithSeed(s, len, uint128(k0, k1));
+}
+
+} // namespace cityhash_v111
+} // namespace internal
+} // namespace base
diff --git a/security/sandbox/chromium/base/third_party/cityhash/city.h b/security/sandbox/chromium/base/third_party/cityhash/city.h
new file mode 100644
index 0000000000..3e3dcaaaa9
--- /dev/null
+++ b/security/sandbox/chromium/base/third_party/cityhash/city.h
@@ -0,0 +1,129 @@
+// Copyright (c) 2011 Google, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+//
+// CityHash, by Geoff Pike and Jyrki Alakuijala
+//
+// http://code.google.com/p/cityhash/
+//
+// This file provides a few functions for hashing strings. All of them are
+// high-quality functions in the sense that they pass standard tests such
+// as Austin Appleby's SMHasher. They are also fast.
+//
+// For 64-bit x86 code, on short strings, we don't know of anything faster than
+// CityHash64 that is of comparable quality. We believe our nearest competitor
+// is Murmur3. For 64-bit x86 code, CityHash64 is an excellent choice for hash
+// tables and most other hashing (excluding cryptography).
+//
+// For 64-bit x86 code, on long strings, the picture is more complicated.
+// On many recent Intel CPUs, such as Nehalem, Westmere, Sandy Bridge, etc.,
+// CityHashCrc128 appears to be faster than all competitors of comparable
+// quality. CityHash128 is also good but not quite as fast. We believe our
+// nearest competitor is Bob Jenkins' Spooky. We don't have great data for
+// other 64-bit CPUs, but for long strings we know that Spooky is slightly
+// faster than CityHash on some relatively recent AMD x86-64 CPUs, for example.
+// Note that CityHashCrc128 is declared in citycrc.h.
+//
+// For 32-bit x86 code, we don't know of anything faster than CityHash32 that
+// is of comparable quality. We believe our nearest competitor is Murmur3A.
+// (On 64-bit CPUs, it is typically faster to use the other CityHash variants.)
+//
+// Functions in the CityHash family are not suitable for cryptography.
+//
+// Please see CityHash's README file for more details on our performance
+// measurements and so on.
+//
+// WARNING: This code has been only lightly tested on big-endian platforms!
+// It is known to work well on little-endian platforms that have a small penalty
+// for unaligned reads, such as current Intel and AMD moderate-to-high-end CPUs.
+// It should work on all 32-bit and 64-bit platforms that allow unaligned reads;
+// bug reports are welcome.
+//
+// By the way, for some hash functions, given strings a and b, the hash
+// of a+b is easily derived from the hashes of a and b. This property
+// doesn't hold for any hash functions in this file.
+
+#ifndef BASE_THIRD_PARTY_CITYHASH_CITY_H_
+#define BASE_THIRD_PARTY_CITYHASH_CITY_H_
+
+#include <stdint.h>
+#include <stdlib.h> // for size_t.
+#include <utility>
+
+// XXX(cavalcantii): Declaring it inside of the 'base' namespace allows to
+// handle linker symbol clash error with deprecated CityHash from
+// third_party/smhasher in a few unit tests.
+namespace base {
+namespace internal {
+namespace cityhash_v111 {
+
+typedef uint8_t uint8;
+typedef uint32_t uint32;
+typedef uint64_t uint64;
+typedef std::pair<uint64, uint64> uint128;
+
+inline uint64 Uint128Low64(const uint128& x) {
+ return x.first;
+}
+inline uint64 Uint128High64(const uint128& x) {
+ return x.second;
+}
+
+// Hash function for a byte array.
+uint64 CityHash64(const char* buf, size_t len);
+
+// Hash function for a byte array. For convenience, a 64-bit seed is also
+// hashed into the result.
+uint64 CityHash64WithSeed(const char* buf, size_t len, uint64 seed);
+
+// Hash function for a byte array. For convenience, two seeds are also
+// hashed into the result.
+uint64 CityHash64WithSeeds(const char* buf,
+ size_t len,
+ uint64 seed0,
+ uint64 seed1);
+
+// Hash function for a byte array.
+uint128 CityHash128(const char* s, size_t len);
+
+// Hash function for a byte array. For convenience, a 128-bit seed is also
+// hashed into the result.
+uint128 CityHash128WithSeed(const char* s, size_t len, uint128 seed);
+
+// Hash function for a byte array. Most useful in 32-bit binaries.
+uint32 CityHash32(const char* buf, size_t len);
+
+// Hash 128 input bits down to 64 bits of output.
+// This is intended to be a reasonably good hash function.
+inline uint64 Hash128to64(const uint128& x) {
+ // Murmur-inspired hashing.
+ const uint64 kMul = 0x9ddfea08eb382d69ULL;
+ uint64 a = (Uint128Low64(x) ^ Uint128High64(x)) * kMul;
+ a ^= (a >> 47);
+ uint64 b = (Uint128High64(x) ^ a) * kMul;
+ b ^= (b >> 47);
+ b *= kMul;
+ return b;
+}
+
+} // namespace cityhash_v111
+} // namespace internal
+} // namespace base
+
+#endif // CITY_HASH_H_
diff --git a/security/sandbox/chromium/base/third_party/dynamic_annotations/LICENSE b/security/sandbox/chromium/base/third_party/dynamic_annotations/LICENSE
new file mode 100644
index 0000000000..5c581a9391
--- /dev/null
+++ b/security/sandbox/chromium/base/third_party/dynamic_annotations/LICENSE
@@ -0,0 +1,28 @@
+/* Copyright (c) 2008-2009, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * ---
+ * Author: Kostya Serebryany
+ */
diff --git a/security/sandbox/chromium/base/third_party/dynamic_annotations/dynamic_annotations.h b/security/sandbox/chromium/base/third_party/dynamic_annotations/dynamic_annotations.h
new file mode 100644
index 0000000000..8d7f05202b
--- /dev/null
+++ b/security/sandbox/chromium/base/third_party/dynamic_annotations/dynamic_annotations.h
@@ -0,0 +1,595 @@
+/* Copyright (c) 2011, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* This file defines dynamic annotations for use with dynamic analysis
+ tool such as valgrind, PIN, etc.
+
+ Dynamic annotation is a source code annotation that affects
+ the generated code (that is, the annotation is not a comment).
+ Each such annotation is attached to a particular
+ instruction and/or to a particular object (address) in the program.
+
+ The annotations that should be used by users are macros in all upper-case
+ (e.g., ANNOTATE_NEW_MEMORY).
+
+ Actual implementation of these macros may differ depending on the
+ dynamic analysis tool being used.
+
+ See http://code.google.com/p/data-race-test/ for more information.
+
+ This file supports the following dynamic analysis tools:
+ - None (DYNAMIC_ANNOTATIONS_ENABLED is not defined or zero).
+ Macros are defined empty.
+ - ThreadSanitizer, Helgrind, DRD (DYNAMIC_ANNOTATIONS_ENABLED is 1).
+ Macros are defined as calls to non-inlinable empty functions
+ that are intercepted by Valgrind. */
+
+#ifndef __DYNAMIC_ANNOTATIONS_H__
+#define __DYNAMIC_ANNOTATIONS_H__
+
+#ifndef DYNAMIC_ANNOTATIONS_PREFIX
+# define DYNAMIC_ANNOTATIONS_PREFIX
+#endif
+
+#ifndef DYNAMIC_ANNOTATIONS_PROVIDE_RUNNING_ON_VALGRIND
+# define DYNAMIC_ANNOTATIONS_PROVIDE_RUNNING_ON_VALGRIND 1
+#endif
+
+#ifdef DYNAMIC_ANNOTATIONS_WANT_ATTRIBUTE_WEAK
+# ifdef __GNUC__
+# define DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK __attribute__((weak))
+# else
+/* TODO(glider): for Windows support we may want to change this macro in order
+ to prepend __declspec(selectany) to the annotations' declarations. */
+# error weak annotations are not supported for your compiler
+# endif
+#else
+# define DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK
+#endif
+
+/* The following preprocessor magic prepends the value of
+ DYNAMIC_ANNOTATIONS_PREFIX to annotation function names. */
+#define DYNAMIC_ANNOTATIONS_GLUE0(A, B) A##B
+#define DYNAMIC_ANNOTATIONS_GLUE(A, B) DYNAMIC_ANNOTATIONS_GLUE0(A, B)
+#define DYNAMIC_ANNOTATIONS_NAME(name) \
+ DYNAMIC_ANNOTATIONS_GLUE(DYNAMIC_ANNOTATIONS_PREFIX, name)
+
+#ifndef DYNAMIC_ANNOTATIONS_ENABLED
+# define DYNAMIC_ANNOTATIONS_ENABLED 0
+#endif
+
+#if DYNAMIC_ANNOTATIONS_ENABLED != 0
+
+ /* -------------------------------------------------------------
+ Annotations useful when implementing condition variables such as CondVar,
+ using conditional critical sections (Await/LockWhen) and when constructing
+ user-defined synchronization mechanisms.
+
+ The annotations ANNOTATE_HAPPENS_BEFORE() and ANNOTATE_HAPPENS_AFTER() can
+ be used to define happens-before arcs in user-defined synchronization
+ mechanisms: the race detector will infer an arc from the former to the
+ latter when they share the same argument pointer.
+
+ Example 1 (reference counting):
+
+ void Unref() {
+ ANNOTATE_HAPPENS_BEFORE(&refcount_);
+ if (AtomicDecrementByOne(&refcount_) == 0) {
+ ANNOTATE_HAPPENS_AFTER(&refcount_);
+ delete this;
+ }
+ }
+
+ Example 2 (message queue):
+
+ void MyQueue::Put(Type *e) {
+ MutexLock lock(&mu_);
+ ANNOTATE_HAPPENS_BEFORE(e);
+ PutElementIntoMyQueue(e);
+ }
+
+ Type *MyQueue::Get() {
+ MutexLock lock(&mu_);
+ Type *e = GetElementFromMyQueue();
+ ANNOTATE_HAPPENS_AFTER(e);
+ return e;
+ }
+
+ Note: when possible, please use the existing reference counting and message
+ queue implementations instead of inventing new ones. */
+
+ /* Report that wait on the condition variable at address "cv" has succeeded
+ and the lock at address "lock" is held. */
+ #define ANNOTATE_CONDVAR_LOCK_WAIT(cv, lock) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateCondVarWait)(__FILE__, __LINE__, cv, lock)
+
+ /* Report that wait on the condition variable at "cv" has succeeded. Variant
+ w/o lock. */
+ #define ANNOTATE_CONDVAR_WAIT(cv) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateCondVarWait)(__FILE__, __LINE__, cv, NULL)
+
+ /* Report that we are about to signal on the condition variable at address
+ "cv". */
+ #define ANNOTATE_CONDVAR_SIGNAL(cv) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateCondVarSignal)(__FILE__, __LINE__, cv)
+
+ /* Report that we are about to signal_all on the condition variable at address
+ "cv". */
+ #define ANNOTATE_CONDVAR_SIGNAL_ALL(cv) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateCondVarSignalAll)(__FILE__, __LINE__, cv)
+
+ /* Annotations for user-defined synchronization mechanisms. */
+ #define ANNOTATE_HAPPENS_BEFORE(obj) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateHappensBefore)(__FILE__, __LINE__, obj)
+ #define ANNOTATE_HAPPENS_AFTER(obj) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateHappensAfter)(__FILE__, __LINE__, obj)
+
+ /* DEPRECATED. Don't use it. */
+ #define ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotatePublishMemoryRange)(__FILE__, __LINE__, \
+ pointer, size)
+
+ /* DEPRECATED. Don't use it. */
+ #define ANNOTATE_UNPUBLISH_MEMORY_RANGE(pointer, size) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateUnpublishMemoryRange)(__FILE__, __LINE__, \
+ pointer, size)
+
+ /* DEPRECATED. Don't use it. */
+ #define ANNOTATE_SWAP_MEMORY_RANGE(pointer, size) \
+ do { \
+ ANNOTATE_UNPUBLISH_MEMORY_RANGE(pointer, size); \
+ ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size); \
+ } while (0)
+
+ /* Instruct the tool to create a happens-before arc between mu->Unlock() and
+ mu->Lock(). This annotation may slow down the race detector and hide real
+ races. Normally it is used only when it would be difficult to annotate each
+ of the mutex's critical sections individually using the annotations above.
+ This annotation makes sense only for hybrid race detectors. For pure
+ happens-before detectors this is a no-op. For more details see
+ http://code.google.com/p/data-race-test/wiki/PureHappensBeforeVsHybrid . */
+ #define ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX(mu) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateMutexIsUsedAsCondVar)(__FILE__, __LINE__, \
+ mu)
+
+ /* Opposite to ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX.
+ Instruct the tool to NOT create h-b arcs between Unlock and Lock, even in
+ pure happens-before mode. For a hybrid mode this is a no-op. */
+ #define ANNOTATE_NOT_HAPPENS_BEFORE_MUTEX(mu) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateMutexIsNotPHB)(__FILE__, __LINE__, mu)
+
+ /* Deprecated. Use ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX. */
+ #define ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(mu) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateMutexIsUsedAsCondVar)(__FILE__, __LINE__, \
+ mu)
+
+ /* -------------------------------------------------------------
+ Annotations useful when defining memory allocators, or when memory that
+ was protected in one way starts to be protected in another. */
+
+ /* Report that a new memory at "address" of size "size" has been allocated.
+ This might be used when the memory has been retrieved from a free list and
+ is about to be reused, or when a the locking discipline for a variable
+ changes. */
+ #define ANNOTATE_NEW_MEMORY(address, size) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateNewMemory)(__FILE__, __LINE__, address, \
+ size)
+
+ /* -------------------------------------------------------------
+ Annotations useful when defining FIFO queues that transfer data between
+ threads. */
+
+ /* Report that the producer-consumer queue (such as ProducerConsumerQueue) at
+ address "pcq" has been created. The ANNOTATE_PCQ_* annotations
+ should be used only for FIFO queues. For non-FIFO queues use
+ ANNOTATE_HAPPENS_BEFORE (for put) and ANNOTATE_HAPPENS_AFTER (for get). */
+ #define ANNOTATE_PCQ_CREATE(pcq) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQCreate)(__FILE__, __LINE__, pcq)
+
+ /* Report that the queue at address "pcq" is about to be destroyed. */
+ #define ANNOTATE_PCQ_DESTROY(pcq) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQDestroy)(__FILE__, __LINE__, pcq)
+
+ /* Report that we are about to put an element into a FIFO queue at address
+ "pcq". */
+ #define ANNOTATE_PCQ_PUT(pcq) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQPut)(__FILE__, __LINE__, pcq)
+
+ /* Report that we've just got an element from a FIFO queue at address
+ "pcq". */
+ #define ANNOTATE_PCQ_GET(pcq) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQGet)(__FILE__, __LINE__, pcq)
+
+ /* -------------------------------------------------------------
+ Annotations that suppress errors. It is usually better to express the
+ program's synchronization using the other annotations, but these can
+ be used when all else fails. */
+
+ /* Report that we may have a benign race at "pointer", with size
+ "sizeof(*(pointer))". "pointer" must be a non-void* pointer. Insert at the
+ point where "pointer" has been allocated, preferably close to the point
+ where the race happens. See also ANNOTATE_BENIGN_RACE_STATIC. */
+ #define ANNOTATE_BENIGN_RACE(pointer, description) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateBenignRaceSized)(__FILE__, __LINE__, \
+ pointer, sizeof(*(pointer)), description)
+
+ /* Same as ANNOTATE_BENIGN_RACE(address, description), but applies to
+ the memory range [address, address+size). */
+ #define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateBenignRaceSized)(__FILE__, __LINE__, \
+ address, size, description)
+
+ /* Request the analysis tool to ignore all reads in the current thread
+ until ANNOTATE_IGNORE_READS_END is called.
+ Useful to ignore intentional racey reads, while still checking
+ other reads and all writes.
+ See also ANNOTATE_UNPROTECTED_READ. */
+ #define ANNOTATE_IGNORE_READS_BEGIN() \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreReadsBegin)(__FILE__, __LINE__)
+
+ /* Stop ignoring reads. */
+ #define ANNOTATE_IGNORE_READS_END() \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreReadsEnd)(__FILE__, __LINE__)
+
+ /* Similar to ANNOTATE_IGNORE_READS_BEGIN, but ignore writes. */
+ #define ANNOTATE_IGNORE_WRITES_BEGIN() \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreWritesBegin)(__FILE__, __LINE__)
+
+ /* Stop ignoring writes. */
+ #define ANNOTATE_IGNORE_WRITES_END() \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreWritesEnd)(__FILE__, __LINE__)
+
+ /* Start ignoring all memory accesses (reads and writes). */
+ #define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \
+ do {\
+ ANNOTATE_IGNORE_READS_BEGIN();\
+ ANNOTATE_IGNORE_WRITES_BEGIN();\
+ }while(0)\
+
+ /* Stop ignoring all memory accesses. */
+ #define ANNOTATE_IGNORE_READS_AND_WRITES_END() \
+ do {\
+ ANNOTATE_IGNORE_WRITES_END();\
+ ANNOTATE_IGNORE_READS_END();\
+ }while(0)\
+
+ /* Similar to ANNOTATE_IGNORE_READS_BEGIN, but ignore synchronization events:
+ RWLOCK* and CONDVAR*. */
+ #define ANNOTATE_IGNORE_SYNC_BEGIN() \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreSyncBegin)(__FILE__, __LINE__)
+
+ /* Stop ignoring sync events. */
+ #define ANNOTATE_IGNORE_SYNC_END() \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreSyncEnd)(__FILE__, __LINE__)
+
+
+ /* Enable (enable!=0) or disable (enable==0) race detection for all threads.
+ This annotation could be useful if you want to skip expensive race analysis
+ during some period of program execution, e.g. during initialization. */
+ #define ANNOTATE_ENABLE_RACE_DETECTION(enable) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateEnableRaceDetection)(__FILE__, __LINE__, \
+ enable)
+
+ /* -------------------------------------------------------------
+ Annotations useful for debugging. */
+
+ /* Request to trace every access to "address". */
+ #define ANNOTATE_TRACE_MEMORY(address) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateTraceMemory)(__FILE__, __LINE__, address)
+
+ /* Report the current thread name to a race detector. */
+ #define ANNOTATE_THREAD_NAME(name) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateThreadName)(__FILE__, __LINE__, name)
+
+ /* -------------------------------------------------------------
+ Annotations useful when implementing locks. They are not
+ normally needed by modules that merely use locks.
+ The "lock" argument is a pointer to the lock object. */
+
+ /* Report that a lock has been created at address "lock". */
+ #define ANNOTATE_RWLOCK_CREATE(lock) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockCreate)(__FILE__, __LINE__, lock)
+
+ /* Report that the lock at address "lock" is about to be destroyed. */
+ #define ANNOTATE_RWLOCK_DESTROY(lock) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockDestroy)(__FILE__, __LINE__, lock)
+
+ /* Report that the lock at address "lock" has been acquired.
+ is_w=1 for writer lock, is_w=0 for reader lock. */
+ #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockAcquired)(__FILE__, __LINE__, lock, \
+ is_w)
+
+ /* Report that the lock at address "lock" is about to be released. */
+ #define ANNOTATE_RWLOCK_RELEASED(lock, is_w) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockReleased)(__FILE__, __LINE__, lock, \
+ is_w)
+
+ /* -------------------------------------------------------------
+ Annotations useful when implementing barriers. They are not
+ normally needed by modules that merely use barriers.
+ The "barrier" argument is a pointer to the barrier object. */
+
+ /* Report that the "barrier" has been initialized with initial "count".
+ If 'reinitialization_allowed' is true, initialization is allowed to happen
+ multiple times w/o calling barrier_destroy() */
+ #define ANNOTATE_BARRIER_INIT(barrier, count, reinitialization_allowed) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierInit)(__FILE__, __LINE__, barrier, \
+ count, reinitialization_allowed)
+
+ /* Report that we are about to enter barrier_wait("barrier"). */
+ #define ANNOTATE_BARRIER_WAIT_BEFORE(barrier) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierWaitBefore)(__FILE__, __LINE__, \
+ barrier)
+
+ /* Report that we just exited barrier_wait("barrier"). */
+ #define ANNOTATE_BARRIER_WAIT_AFTER(barrier) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierWaitAfter)(__FILE__, __LINE__, \
+ barrier)
+
+ /* Report that the "barrier" has been destroyed. */
+ #define ANNOTATE_BARRIER_DESTROY(barrier) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierDestroy)(__FILE__, __LINE__, \
+ barrier)
+
+ /* -------------------------------------------------------------
+ Annotations useful for testing race detectors. */
+
+ /* Report that we expect a race on the variable at "address".
+ Use only in unit tests for a race detector. */
+ #define ANNOTATE_EXPECT_RACE(address, description) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateExpectRace)(__FILE__, __LINE__, address, \
+ description)
+
+ #define ANNOTATE_FLUSH_EXPECTED_RACES() \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateFlushExpectedRaces)(__FILE__, __LINE__)
+
+ /* A no-op. Insert where you like to test the interceptors. */
+ #define ANNOTATE_NO_OP(arg) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateNoOp)(__FILE__, __LINE__, arg)
+
+ /* Force the race detector to flush its state. The actual effect depends on
+ * the implementation of the detector. */
+ #define ANNOTATE_FLUSH_STATE() \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateFlushState)(__FILE__, __LINE__)
+
+
+#else /* DYNAMIC_ANNOTATIONS_ENABLED == 0 */
+
+ #define ANNOTATE_RWLOCK_CREATE(lock) /* empty */
+ #define ANNOTATE_RWLOCK_DESTROY(lock) /* empty */
+ #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) /* empty */
+ #define ANNOTATE_RWLOCK_RELEASED(lock, is_w) /* empty */
+ #define ANNOTATE_BARRIER_INIT(barrier, count, reinitialization_allowed) /* */
+ #define ANNOTATE_BARRIER_WAIT_BEFORE(barrier) /* empty */
+ #define ANNOTATE_BARRIER_WAIT_AFTER(barrier) /* empty */
+ #define ANNOTATE_BARRIER_DESTROY(barrier) /* empty */
+ #define ANNOTATE_CONDVAR_LOCK_WAIT(cv, lock) /* empty */
+ #define ANNOTATE_CONDVAR_WAIT(cv) /* empty */
+ #define ANNOTATE_CONDVAR_SIGNAL(cv) /* empty */
+ #define ANNOTATE_CONDVAR_SIGNAL_ALL(cv) /* empty */
+ #define ANNOTATE_HAPPENS_BEFORE(obj) /* empty */
+ #define ANNOTATE_HAPPENS_AFTER(obj) /* empty */
+ #define ANNOTATE_PUBLISH_MEMORY_RANGE(address, size) /* empty */
+ #define ANNOTATE_UNPUBLISH_MEMORY_RANGE(address, size) /* empty */
+ #define ANNOTATE_SWAP_MEMORY_RANGE(address, size) /* empty */
+ #define ANNOTATE_PCQ_CREATE(pcq) /* empty */
+ #define ANNOTATE_PCQ_DESTROY(pcq) /* empty */
+ #define ANNOTATE_PCQ_PUT(pcq) /* empty */
+ #define ANNOTATE_PCQ_GET(pcq) /* empty */
+ #define ANNOTATE_NEW_MEMORY(address, size) /* empty */
+ #define ANNOTATE_EXPECT_RACE(address, description) /* empty */
+ #define ANNOTATE_FLUSH_EXPECTED_RACES(address, description) /* empty */
+ #define ANNOTATE_BENIGN_RACE(address, description) /* empty */
+ #define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) /* empty */
+ #define ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX(mu) /* empty */
+ #define ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(mu) /* empty */
+ #define ANNOTATE_TRACE_MEMORY(arg) /* empty */
+ #define ANNOTATE_THREAD_NAME(name) /* empty */
+ #define ANNOTATE_IGNORE_READS_BEGIN() /* empty */
+ #define ANNOTATE_IGNORE_READS_END() /* empty */
+ #define ANNOTATE_IGNORE_WRITES_BEGIN() /* empty */
+ #define ANNOTATE_IGNORE_WRITES_END() /* empty */
+ #define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() /* empty */
+ #define ANNOTATE_IGNORE_READS_AND_WRITES_END() /* empty */
+ #define ANNOTATE_IGNORE_SYNC_BEGIN() /* empty */
+ #define ANNOTATE_IGNORE_SYNC_END() /* empty */
+ #define ANNOTATE_ENABLE_RACE_DETECTION(enable) /* empty */
+ #define ANNOTATE_NO_OP(arg) /* empty */
+ #define ANNOTATE_FLUSH_STATE() /* empty */
+
+#endif /* DYNAMIC_ANNOTATIONS_ENABLED */
+
+/* Use the macros above rather than using these functions directly. */
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockCreate)(
+ const char *file, int line,
+ const volatile void *lock) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockDestroy)(
+ const char *file, int line,
+ const volatile void *lock) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockAcquired)(
+ const char *file, int line,
+ const volatile void *lock, long is_w) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockReleased)(
+ const char *file, int line,
+ const volatile void *lock, long is_w) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierInit)(
+ const char *file, int line, const volatile void *barrier, long count,
+ long reinitialization_allowed) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierWaitBefore)(
+ const char *file, int line,
+ const volatile void *barrier) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierWaitAfter)(
+ const char *file, int line,
+ const volatile void *barrier) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierDestroy)(
+ const char *file, int line,
+ const volatile void *barrier) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateCondVarWait)(
+ const char *file, int line, const volatile void *cv,
+ const volatile void *lock) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateCondVarSignal)(
+ const char *file, int line,
+ const volatile void *cv) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateCondVarSignalAll)(
+ const char *file, int line,
+ const volatile void *cv) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateHappensBefore)(
+ const char *file, int line,
+ const volatile void *obj) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateHappensAfter)(
+ const char *file, int line,
+ const volatile void *obj) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotatePublishMemoryRange)(
+ const char *file, int line,
+ const volatile void *address, long size) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateUnpublishMemoryRange)(
+ const char *file, int line,
+ const volatile void *address, long size) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQCreate)(
+ const char *file, int line,
+ const volatile void *pcq) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQDestroy)(
+ const char *file, int line,
+ const volatile void *pcq) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQPut)(
+ const char *file, int line,
+ const volatile void *pcq) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQGet)(
+ const char *file, int line,
+ const volatile void *pcq) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateNewMemory)(
+ const char *file, int line,
+ const volatile void *mem, long size) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateExpectRace)(
+ const char *file, int line, const volatile void *mem,
+ const char *description) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateFlushExpectedRaces)(
+ const char *file, int line) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateBenignRace)(
+ const char *file, int line, const volatile void *mem,
+ const char *description) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateBenignRaceSized)(
+ const char *file, int line, const volatile void *mem, long size,
+ const char *description) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateMutexIsUsedAsCondVar)(
+ const char *file, int line,
+ const volatile void *mu) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateMutexIsNotPHB)(
+ const char *file, int line,
+ const volatile void *mu) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateTraceMemory)(
+ const char *file, int line,
+ const volatile void *arg) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateThreadName)(
+ const char *file, int line,
+ const char *name) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreReadsBegin)(
+ const char *file, int line) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreReadsEnd)(
+ const char *file, int line) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreWritesBegin)(
+ const char *file, int line) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreWritesEnd)(
+ const char *file, int line) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreSyncBegin)(
+ const char *file, int line) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreSyncEnd)(
+ const char *file, int line) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateEnableRaceDetection)(
+ const char *file, int line, int enable) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateNoOp)(
+ const char *file, int line,
+ const volatile void *arg) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateFlushState)(
+ const char *file, int line) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+
+#if DYNAMIC_ANNOTATIONS_PROVIDE_RUNNING_ON_VALGRIND == 1
+/* Return non-zero value if running under valgrind.
+
+ If "valgrind.h" is included into dynamic_annotations.c,
+ the regular valgrind mechanism will be used.
+ See http://valgrind.org/docs/manual/manual-core-adv.html about
+ RUNNING_ON_VALGRIND and other valgrind "client requests".
+ The file "valgrind.h" may be obtained by doing
+ svn co svn://svn.valgrind.org/valgrind/trunk/include
+
+ If for some reason you can't use "valgrind.h" or want to fake valgrind,
+ there are two ways to make this function return non-zero:
+ - Use environment variable: export RUNNING_ON_VALGRIND=1
+ - Make your tool intercept the function RunningOnValgrind() and
+ change its return value.
+ */
+int RunningOnValgrind(void) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+#endif /* DYNAMIC_ANNOTATIONS_PROVIDE_RUNNING_ON_VALGRIND == 1 */
+
+#ifdef __cplusplus
+}
+#endif
+
+#if DYNAMIC_ANNOTATIONS_ENABLED != 0 && defined(__cplusplus)
+
+ /* ANNOTATE_UNPROTECTED_READ is the preferred way to annotate racey reads.
+
+ Instead of doing
+ ANNOTATE_IGNORE_READS_BEGIN();
+ ... = x;
+ ANNOTATE_IGNORE_READS_END();
+ one can use
+ ... = ANNOTATE_UNPROTECTED_READ(x); */
+ template <class T>
+ inline T ANNOTATE_UNPROTECTED_READ(const volatile T &x) {
+ ANNOTATE_IGNORE_READS_BEGIN();
+ T res = x;
+ ANNOTATE_IGNORE_READS_END();
+ return res;
+ }
+ /* Apply ANNOTATE_BENIGN_RACE_SIZED to a static variable. */
+ #define ANNOTATE_BENIGN_RACE_STATIC(static_var, description) \
+ namespace { \
+ class static_var ## _annotator { \
+ public: \
+ static_var ## _annotator() { \
+ ANNOTATE_BENIGN_RACE_SIZED(&static_var, \
+ sizeof(static_var), \
+ # static_var ": " description); \
+ } \
+ }; \
+ static static_var ## _annotator the ## static_var ## _annotator;\
+ }
+#else /* DYNAMIC_ANNOTATIONS_ENABLED == 0 */
+
+ #define ANNOTATE_UNPROTECTED_READ(x) (x)
+ #define ANNOTATE_BENIGN_RACE_STATIC(static_var, description) /* empty */
+
+#endif /* DYNAMIC_ANNOTATIONS_ENABLED */
+
+#endif /* __DYNAMIC_ANNOTATIONS_H__ */
diff --git a/security/sandbox/chromium/base/third_party/icu/LICENSE b/security/sandbox/chromium/base/third_party/icu/LICENSE
new file mode 100644
index 0000000000..2882e4ebda
--- /dev/null
+++ b/security/sandbox/chromium/base/third_party/icu/LICENSE
@@ -0,0 +1,76 @@
+COPYRIGHT AND PERMISSION NOTICE (ICU 58 and later)
+
+Copyright © 1991-2017 Unicode, Inc. All rights reserved.
+Distributed under the Terms of Use in http://www.unicode.org/copyright.html
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of the Unicode data files and any associated documentation
+(the "Data Files") or Unicode software and any associated documentation
+(the "Software") to deal in the Data Files or Software
+without restriction, including without limitation the rights to use,
+copy, modify, merge, publish, distribute, and/or sell copies of
+the Data Files or Software, and to permit persons to whom the Data Files
+or Software are furnished to do so, provided that either
+(a) this copyright and permission notice appear with all copies
+of the Data Files or Software, or
+(b) this copyright and permission notice appear in associated
+Documentation.
+
+THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS
+NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL
+DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THE DATA FILES OR SOFTWARE.
+
+Except as contained in this notice, the name of a copyright holder
+shall not be used in advertising or otherwise to promote the sale,
+use or other dealings in these Data Files or Software without prior
+written authorization of the copyright holder.
+
+---------------------
+
+Third-Party Software Licenses
+
+This section contains third-party software notices and/or additional
+terms for licensed third-party software components included within ICU
+libraries.
+
+1. ICU License - ICU 1.8.1 to ICU 57.1
+
+COPYRIGHT AND PERMISSION NOTICE
+
+Copyright (c) 1995-2016 International Business Machines Corporation and others
+All rights reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, and/or sell copies of the Software, and to permit persons
+to whom the Software is furnished to do so, provided that the above
+copyright notice(s) and this permission notice appear in all copies of
+the Software and that both the above copyright notice(s) and this
+permission notice appear in supporting documentation.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
+OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+HOLDERS INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY
+SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER
+RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF
+CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+Except as contained in this notice, the name of a copyright holder
+shall not be used in advertising or otherwise to promote the sale, use
+or other dealings in this Software without prior written authorization
+of the copyright holder.
+
+All trademarks and registered trademarks mentioned herein are the
+property of their respective owners.
diff --git a/security/sandbox/chromium/base/third_party/icu/icu_utf.cc b/security/sandbox/chromium/base/third_party/icu/icu_utf.cc
new file mode 100644
index 0000000000..a3262b04d3
--- /dev/null
+++ b/security/sandbox/chromium/base/third_party/icu/icu_utf.cc
@@ -0,0 +1,131 @@
+// © 2016 and later: Unicode, Inc. and others.
+// License & terms of use: http://www.unicode.org/copyright.html
+/*
+******************************************************************************
+*
+* Copyright (C) 1999-2012, International Business Machines
+* Corporation and others. All Rights Reserved.
+*
+******************************************************************************
+* file name: utf_impl.cpp
+* encoding: UTF-8
+* tab size: 8 (not used)
+* indentation:4
+*
+* created on: 1999sep13
+* created by: Markus W. Scherer
+*
+* This file provides implementation functions for macros in the utfXX.h
+* that would otherwise be too long as macros.
+*/
+
+#include "base/third_party/icu/icu_utf.h"
+
+namespace base_icu {
+
+// source/common/utf_impl.cpp
+
+static const UChar32
+utf8_errorValue[6]={
+ // Same values as UTF8_ERROR_VALUE_1, UTF8_ERROR_VALUE_2, UTF_ERROR_VALUE,
+ // but without relying on the obsolete unicode/utf_old.h.
+ 0x15, 0x9f, 0xffff,
+ 0x10ffff
+};
+
+static UChar32
+errorValue(int32_t count, int8_t strict) {
+ if(strict>=0) {
+ return utf8_errorValue[count];
+ } else if(strict==-3) {
+ return 0xfffd;
+ } else {
+ return CBU_SENTINEL;
+ }
+}
+
+/*
+ * Handle the non-inline part of the U8_NEXT() and U8_NEXT_FFFD() macros
+ * and their obsolete sibling UTF8_NEXT_CHAR_SAFE().
+ *
+ * U8_NEXT() supports NUL-terminated strings indicated via length<0.
+ *
+ * The "strict" parameter controls the error behavior:
+ * <0 "Safe" behavior of U8_NEXT():
+ * -1: All illegal byte sequences yield U_SENTINEL=-1.
+ * -2: Same as -1, except for lenient treatment of surrogate code points as legal.
+ * Some implementations use this for roundtripping of
+ * Unicode 16-bit strings that are not well-formed UTF-16, that is, they
+ * contain unpaired surrogates.
+ * -3: All illegal byte sequences yield U+FFFD.
+ * 0 Obsolete "safe" behavior of UTF8_NEXT_CHAR_SAFE(..., FALSE):
+ * All illegal byte sequences yield a positive code point such that this
+ * result code point would be encoded with the same number of bytes as
+ * the illegal sequence.
+ * >0 Obsolete "strict" behavior of UTF8_NEXT_CHAR_SAFE(..., TRUE):
+ * Same as the obsolete "safe" behavior, but non-characters are also treated
+ * like illegal sequences.
+ *
+ * Note that a UBool is the same as an int8_t.
+ */
+UChar32
+utf8_nextCharSafeBody(const uint8_t *s, int32_t *pi, int32_t length, UChar32 c, UBool strict) {
+ // *pi is one after byte c.
+ int32_t i=*pi;
+ // length can be negative for NUL-terminated strings: Read and validate one byte at a time.
+ if(i==length || c>0xf4) {
+ // end of string, or not a lead byte
+ } else if(c>=0xf0) {
+ // Test for 4-byte sequences first because
+ // U8_NEXT() handles shorter valid sequences inline.
+ uint8_t t1=s[i], t2, t3;
+ c&=7;
+ if(CBU8_IS_VALID_LEAD4_AND_T1(c, t1) &&
+ ++i!=length && (t2=s[i]-0x80)<=0x3f &&
+ ++i!=length && (t3=s[i]-0x80)<=0x3f) {
+ ++i;
+ c=(c<<18)|((t1&0x3f)<<12)|(t2<<6)|t3;
+ // strict: forbid non-characters like U+fffe
+ if(strict<=0 || !CBU_IS_UNICODE_NONCHAR(c)) {
+ *pi=i;
+ return c;
+ }
+ }
+ } else if(c>=0xe0) {
+ c&=0xf;
+ if(strict!=-2) {
+ uint8_t t1=s[i], t2;
+ if(CBU8_IS_VALID_LEAD3_AND_T1(c, t1) &&
+ ++i!=length && (t2=s[i]-0x80)<=0x3f) {
+ ++i;
+ c=(c<<12)|((t1&0x3f)<<6)|t2;
+ // strict: forbid non-characters like U+fffe
+ if(strict<=0 || !CBU_IS_UNICODE_NONCHAR(c)) {
+ *pi=i;
+ return c;
+ }
+ }
+ } else {
+ // strict=-2 -> lenient: allow surrogates
+ uint8_t t1=s[i]-0x80, t2;
+ if(t1<=0x3f && (c>0 || t1>=0x20) &&
+ ++i!=length && (t2=s[i]-0x80)<=0x3f) {
+ *pi=i+1;
+ return (c<<12)|(t1<<6)|t2;
+ }
+ }
+ } else if(c>=0xc2) {
+ uint8_t t1=s[i]-0x80;
+ if(t1<=0x3f) {
+ *pi=i+1;
+ return ((c-0xc0)<<6)|t1;
+ }
+ } // else 0x80<=c<0xc2 is not a lead byte
+
+ /* error handling */
+ c=errorValue(i-*pi, strict);
+ *pi=i;
+ return c;
+}
+
+} // namespace base_icu
diff --git a/security/sandbox/chromium/base/third_party/icu/icu_utf.h b/security/sandbox/chromium/base/third_party/icu/icu_utf.h
new file mode 100644
index 0000000000..2ba82316c2
--- /dev/null
+++ b/security/sandbox/chromium/base/third_party/icu/icu_utf.h
@@ -0,0 +1,442 @@
+// © 2016 and later: Unicode, Inc. and others.
+// License & terms of use: http://www.unicode.org/copyright.html
+/*
+******************************************************************************
+*
+* Copyright (C) 1999-2015, International Business Machines
+* Corporation and others. All Rights Reserved.
+*
+******************************************************************************
+*/
+
+#ifndef BASE_THIRD_PARTY_ICU_ICU_UTF_H_
+#define BASE_THIRD_PARTY_ICU_ICU_UTF_H_
+
+#include <stdint.h>
+
+namespace base_icu {
+
+// source/common/unicode/umachine.h
+
+/** The ICU boolean type @stable ICU 2.0 */
+typedef int8_t UBool;
+
+/**
+ * Define UChar32 as a type for single Unicode code points.
+ * UChar32 is a signed 32-bit integer (same as int32_t).
+ *
+ * The Unicode code point range is 0..0x10ffff.
+ * All other values (negative or >=0x110000) are illegal as Unicode code points.
+ * They may be used as sentinel values to indicate "done", "error"
+ * or similar non-code point conditions.
+ *
+ * Before ICU 2.4 (Jitterbug 2146), UChar32 was defined
+ * to be wchar_t if that is 32 bits wide (wchar_t may be signed or unsigned)
+ * or else to be uint32_t.
+ * That is, the definition of UChar32 was platform-dependent.
+ *
+ * @see U_SENTINEL
+ * @stable ICU 2.4
+ */
+typedef int32_t UChar32;
+
+/**
+ * This value is intended for sentinel values for APIs that
+ * (take or) return single code points (UChar32).
+ * It is outside of the Unicode code point range 0..0x10ffff.
+ *
+ * For example, a "done" or "error" value in a new API
+ * could be indicated with U_SENTINEL.
+ *
+ * ICU APIs designed before ICU 2.4 usually define service-specific "done"
+ * values, mostly 0xffff.
+ * Those may need to be distinguished from
+ * actual U+ffff text contents by calling functions like
+ * CharacterIterator::hasNext() or UnicodeString::length().
+ *
+ * @return -1
+ * @see UChar32
+ * @stable ICU 2.4
+ */
+#define CBU_SENTINEL (-1)
+
+// source/common/unicode/utf.h
+
+/**
+ * Is this code point a Unicode noncharacter?
+ * @param c 32-bit code point
+ * @return TRUE or FALSE
+ * @stable ICU 2.4
+ */
+#define CBU_IS_UNICODE_NONCHAR(c) \
+ ((c)>=0xfdd0 && \
+ ((c)<=0xfdef || ((c)&0xfffe)==0xfffe) && (c)<=0x10ffff)
+
+/**
+ * Is c a Unicode code point value (0..U+10ffff)
+ * that can be assigned a character?
+ *
+ * Code points that are not characters include:
+ * - single surrogate code points (U+d800..U+dfff, 2048 code points)
+ * - the last two code points on each plane (U+__fffe and U+__ffff, 34 code points)
+ * - U+fdd0..U+fdef (new with Unicode 3.1, 32 code points)
+ * - the highest Unicode code point value is U+10ffff
+ *
+ * This means that all code points below U+d800 are character code points,
+ * and that boundary is tested first for performance.
+ *
+ * @param c 32-bit code point
+ * @return TRUE or FALSE
+ * @stable ICU 2.4
+ */
+#define CBU_IS_UNICODE_CHAR(c) \
+ ((uint32_t)(c)<0xd800 || \
+ (0xdfff<(c) && (c)<=0x10ffff && !CBU_IS_UNICODE_NONCHAR(c)))
+
+/**
+ * Is this code point a surrogate (U+d800..U+dfff)?
+ * @param c 32-bit code point
+ * @return TRUE or FALSE
+ * @stable ICU 2.4
+ */
+#define CBU_IS_SURROGATE(c) (((c)&0xfffff800)==0xd800)
+
+/**
+ * Assuming c is a surrogate code point (U_IS_SURROGATE(c)),
+ * is it a lead surrogate?
+ * @param c 32-bit code point
+ * @return TRUE or FALSE
+ * @stable ICU 2.4
+ */
+#define CBU_IS_SURROGATE_LEAD(c) (((c)&0x400)==0)
+
+// source/common/unicode/utf8.h
+
+/**
+ * Internal bit vector for 3-byte UTF-8 validity check, for use in U8_IS_VALID_LEAD3_AND_T1.
+ * Each bit indicates whether one lead byte + first trail byte pair starts a valid sequence.
+ * Lead byte E0..EF bits 3..0 are used as byte index,
+ * first trail byte bits 7..5 are used as bit index into that byte.
+ * @see U8_IS_VALID_LEAD3_AND_T1
+ * @internal
+ */
+#define CBU8_LEAD3_T1_BITS "\x20\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x10\x30\x30"
+
+/**
+ * Internal 3-byte UTF-8 validity check.
+ * Non-zero if lead byte E0..EF and first trail byte 00..FF start a valid sequence.
+ * @internal
+ */
+#define CBU8_IS_VALID_LEAD3_AND_T1(lead, t1) (CBU8_LEAD3_T1_BITS[(lead)&0xf]&(1<<((uint8_t)(t1)>>5)))
+
+/**
+ * Internal bit vector for 4-byte UTF-8 validity check, for use in U8_IS_VALID_LEAD4_AND_T1.
+ * Each bit indicates whether one lead byte + first trail byte pair starts a valid sequence.
+ * First trail byte bits 7..4 are used as byte index,
+ * lead byte F0..F4 bits 2..0 are used as bit index into that byte.
+ * @see U8_IS_VALID_LEAD4_AND_T1
+ * @internal
+ */
+#define CBU8_LEAD4_T1_BITS "\x00\x00\x00\x00\x00\x00\x00\x00\x1E\x0F\x0F\x0F\x00\x00\x00\x00"
+
+/**
+ * Internal 4-byte UTF-8 validity check.
+ * Non-zero if lead byte F0..F4 and first trail byte 00..FF start a valid sequence.
+ * @internal
+ */
+#define CBU8_IS_VALID_LEAD4_AND_T1(lead, t1) (CBU8_LEAD4_T1_BITS[(uint8_t)(t1)>>4]&(1<<((lead)&7)))
+
+/**
+ * Function for handling "next code point" with error-checking.
+ *
+ * This is internal since it is not meant to be called directly by external clie
+nts;
+ * however it is U_STABLE (not U_INTERNAL) since it is called by public macros i
+n this
+ * file and thus must remain stable, and should not be hidden when other interna
+l
+ * functions are hidden (otherwise public macros would fail to compile).
+ * @internal
+ */
+UChar32
+utf8_nextCharSafeBody(const uint8_t *s, int32_t *pi, int32_t length, ::base_icu::UChar32 c, ::base_icu::UBool strict);
+
+/**
+ * Does this code unit (byte) encode a code point by itself (US-ASCII 0..0x7f)?
+ * @param c 8-bit code unit (byte)
+ * @return TRUE or FALSE
+ * @stable ICU 2.4
+ */
+#define CBU8_IS_SINGLE(c) (((c)&0x80)==0)
+
+/**
+ * Is this code unit (byte) a UTF-8 lead byte? (0xC2..0xF4)
+ * @param c 8-bit code unit (byte)
+ * @return TRUE or FALSE
+ * @stable ICU 2.4
+ */
+#define CBU8_IS_LEAD(c) ((uint8_t)((c)-0xc2)<=0x32)
+
+/**
+ * Is this code unit (byte) a UTF-8 trail byte? (0x80..0xBF)
+ * @param c 8-bit code unit (byte)
+ * @return TRUE or FALSE
+ * @stable ICU 2.4
+ */
+#define CBU8_IS_TRAIL(c) ((int8_t)(c)<-0x40)
+
+/**
+ * How many code units (bytes) are used for the UTF-8 encoding
+ * of this Unicode code point?
+ * @param c 32-bit code point
+ * @return 1..4, or 0 if c is a surrogate or not a Unicode code point
+ * @stable ICU 2.4
+ */
+#define CBU8_LENGTH(c) \
+ ((uint32_t)(c)<=0x7f ? 1 : \
+ ((uint32_t)(c)<=0x7ff ? 2 : \
+ ((uint32_t)(c)<=0xd7ff ? 3 : \
+ ((uint32_t)(c)<=0xdfff || (uint32_t)(c)>0x10ffff ? 0 : \
+ ((uint32_t)(c)<=0xffff ? 3 : 4)\
+ ) \
+ ) \
+ ) \
+ )
+
+/**
+ * The maximum number of UTF-8 code units (bytes) per Unicode code point (U+0000..U+10ffff).
+ * @return 4
+ * @stable ICU 2.4
+ */
+#define CBU8_MAX_LENGTH 4
+
+/**
+ * Get a code point from a string at a code point boundary offset,
+ * and advance the offset to the next code point boundary.
+ * (Post-incrementing forward iteration.)
+ * "Safe" macro, checks for illegal sequences and for string boundaries.
+ *
+ * The length can be negative for a NUL-terminated string.
+ *
+ * The offset may point to the lead byte of a multi-byte sequence,
+ * in which case the macro will read the whole sequence.
+ * If the offset points to a trail byte or an illegal UTF-8 sequence, then
+ * c is set to a negative value.
+ *
+ * @param s const uint8_t * string
+ * @param i int32_t string offset, must be i<length
+ * @param length int32_t string length
+ * @param c output UChar32 variable, set to <0 in case of an error
+ * @see U8_NEXT_UNSAFE
+ * @stable ICU 2.4
+ */
+#define CBU8_NEXT(s, i, length, c) { \
+ (c)=(uint8_t)(s)[(i)++]; \
+ if(!CBU8_IS_SINGLE(c)) { \
+ uint8_t __t1, __t2; \
+ if( /* handle U+0800..U+FFFF inline */ \
+ (0xe0<=(c) && (c)<0xf0) && \
+ (((i)+1)<(length) || (length)<0) && \
+ CBU8_IS_VALID_LEAD3_AND_T1((c), __t1=(s)[i]) && \
+ (__t2=(s)[(i)+1]-0x80)<=0x3f) { \
+ (c)=(((c)&0xf)<<12)|((__t1&0x3f)<<6)|__t2; \
+ (i)+=2; \
+ } else if( /* handle U+0080..U+07FF inline */ \
+ ((c)<0xe0 && (c)>=0xc2) && \
+ ((i)!=(length)) && \
+ (__t1=(s)[i]-0x80)<=0x3f) { \
+ (c)=(((c)&0x1f)<<6)|__t1; \
+ ++(i); \
+ } else { \
+ /* function call for "complicated" and error cases */ \
+ (c)=::base_icu::utf8_nextCharSafeBody((const uint8_t *)s, &(i), (length), c, -1); \
+ } \
+ } \
+}
+
+/**
+ * Append a code point to a string, overwriting 1 to 4 bytes.
+ * The offset points to the current end of the string contents
+ * and is advanced (post-increment).
+ * "Unsafe" macro, assumes a valid code point and sufficient space in the string.
+ * Otherwise, the result is undefined.
+ *
+ * @param s const uint8_t * string buffer
+ * @param i string offset
+ * @param c code point to append
+ * @see U8_APPEND
+ * @stable ICU 2.4
+ */
+#define CBU8_APPEND_UNSAFE(s, i, c) { \
+ if((uint32_t)(c)<=0x7f) { \
+ (s)[(i)++]=(uint8_t)(c); \
+ } else { \
+ if((uint32_t)(c)<=0x7ff) { \
+ (s)[(i)++]=(uint8_t)(((c)>>6)|0xc0); \
+ } else { \
+ if((uint32_t)(c)<=0xffff) { \
+ (s)[(i)++]=(uint8_t)(((c)>>12)|0xe0); \
+ } else { \
+ (s)[(i)++]=(uint8_t)(((c)>>18)|0xf0); \
+ (s)[(i)++]=(uint8_t)((((c)>>12)&0x3f)|0x80); \
+ } \
+ (s)[(i)++]=(uint8_t)((((c)>>6)&0x3f)|0x80); \
+ } \
+ (s)[(i)++]=(uint8_t)(((c)&0x3f)|0x80); \
+ } \
+}
+
+// source/common/unicode/utf16.h
+
+/**
+ * Does this code unit alone encode a code point (BMP, not a surrogate)?
+ * @param c 16-bit code unit
+ * @return TRUE or FALSE
+ * @stable ICU 2.4
+ */
+#define CBU16_IS_SINGLE(c) !CBU_IS_SURROGATE(c)
+
+/**
+ * Is this code unit a lead surrogate (U+d800..U+dbff)?
+ * @param c 16-bit code unit
+ * @return TRUE or FALSE
+ * @stable ICU 2.4
+ */
+#define CBU16_IS_LEAD(c) (((c)&0xfffffc00)==0xd800)
+
+/**
+ * Is this code unit a trail surrogate (U+dc00..U+dfff)?
+ * @param c 16-bit code unit
+ * @return TRUE or FALSE
+ * @stable ICU 2.4
+ */
+#define CBU16_IS_TRAIL(c) (((c)&0xfffffc00)==0xdc00)
+
+/**
+ * Is this code unit a surrogate (U+d800..U+dfff)?
+ * @param c 16-bit code unit
+ * @return TRUE or FALSE
+ * @stable ICU 2.4
+ */
+#define CBU16_IS_SURROGATE(c) CBU_IS_SURROGATE(c)
+
+/**
+ * Assuming c is a surrogate code point (U16_IS_SURROGATE(c)),
+ * is it a lead surrogate?
+ * @param c 16-bit code unit
+ * @return TRUE or FALSE
+ * @stable ICU 2.4
+ */
+#define CBU16_IS_SURROGATE_LEAD(c) (((c)&0x400)==0)
+
+/**
+ * Helper constant for U16_GET_SUPPLEMENTARY.
+ * @internal
+ */
+#define CBU16_SURROGATE_OFFSET ((0xd800<<10UL)+0xdc00-0x10000)
+
+/**
+ * Get a supplementary code point value (U+10000..U+10ffff)
+ * from its lead and trail surrogates.
+ * The result is undefined if the input values are not
+ * lead and trail surrogates.
+ *
+ * @param lead lead surrogate (U+d800..U+dbff)
+ * @param trail trail surrogate (U+dc00..U+dfff)
+ * @return supplementary code point (U+10000..U+10ffff)
+ * @stable ICU 2.4
+ */
+#define CBU16_GET_SUPPLEMENTARY(lead, trail) \
+ (((::base_icu::UChar32)(lead)<<10UL)+(::base_icu::UChar32)(trail)-CBU16_SURROGATE_OFFSET)
+
+/**
+ * Get the lead surrogate (0xd800..0xdbff) for a
+ * supplementary code point (0x10000..0x10ffff).
+ * @param supplementary 32-bit code point (U+10000..U+10ffff)
+ * @return lead surrogate (U+d800..U+dbff) for supplementary
+ * @stable ICU 2.4
+ */
+#define CBU16_LEAD(supplementary) (::base_icu::UChar)(((supplementary)>>10)+0xd7c0)
+
+/**
+ * Get the trail surrogate (0xdc00..0xdfff) for a
+ * supplementary code point (0x10000..0x10ffff).
+ * @param supplementary 32-bit code point (U+10000..U+10ffff)
+ * @return trail surrogate (U+dc00..U+dfff) for supplementary
+ * @stable ICU 2.4
+ */
+#define CBU16_TRAIL(supplementary) (::base_icu::UChar)(((supplementary)&0x3ff)|0xdc00)
+
+/**
+ * How many 16-bit code units are used to encode this Unicode code point? (1 or 2)
+ * The result is not defined if c is not a Unicode code point (U+0000..U+10ffff).
+ * @param c 32-bit code point
+ * @return 1 or 2
+ * @stable ICU 2.4
+ */
+#define CBU16_LENGTH(c) ((uint32_t)(c)<=0xffff ? 1 : 2)
+
+/**
+ * The maximum number of 16-bit code units per Unicode code point (U+0000..U+10ffff).
+ * @return 2
+ * @stable ICU 2.4
+ */
+#define CBU16_MAX_LENGTH 2
+
+/**
+ * Get a code point from a string at a code point boundary offset,
+ * and advance the offset to the next code point boundary.
+ * (Post-incrementing forward iteration.)
+ * "Safe" macro, handles unpaired surrogates and checks for string boundaries.
+ *
+ * The length can be negative for a NUL-terminated string.
+ *
+ * The offset may point to the lead surrogate unit
+ * for a supplementary code point, in which case the macro will read
+ * the following trail surrogate as well.
+ * If the offset points to a trail surrogate or
+ * to a single, unpaired lead surrogate, then c is set to that unpaired surrogate.
+ *
+ * @param s const UChar * string
+ * @param i string offset, must be i<length
+ * @param length string length
+ * @param c output UChar32 variable
+ * @see U16_NEXT_UNSAFE
+ * @stable ICU 2.4
+ */
+#define CBU16_NEXT(s, i, length, c) { \
+ (c)=(s)[(i)++]; \
+ if(CBU16_IS_LEAD(c)) { \
+ uint16_t __c2; \
+ if((i)!=(length) && CBU16_IS_TRAIL(__c2=(s)[(i)])) { \
+ ++(i); \
+ (c)=CBU16_GET_SUPPLEMENTARY((c), __c2); \
+ } \
+ } \
+}
+
+/**
+ * Append a code point to a string, overwriting 1 or 2 code units.
+ * The offset points to the current end of the string contents
+ * and is advanced (post-increment).
+ * "Unsafe" macro, assumes a valid code point and sufficient space in the string.
+ * Otherwise, the result is undefined.
+ *
+ * @param s const UChar * string buffer
+ * @param i string offset
+ * @param c code point to append
+ * @see U16_APPEND
+ * @stable ICU 2.4
+ */
+#define CBU16_APPEND_UNSAFE(s, i, c) { \
+ if((uint32_t)(c)<=0xffff) { \
+ (s)[(i)++]=(uint16_t)(c); \
+ } else { \
+ (s)[(i)++]=(uint16_t)(((c)>>10)+0xd7c0); \
+ (s)[(i)++]=(uint16_t)(((c)&0x3ff)|0xdc00); \
+ } \
+}
+
+} // namesapce base_icu
+
+#endif // BASE_THIRD_PARTY_ICU_ICU_UTF_H_
diff --git a/security/sandbox/chromium/base/third_party/superfasthash/LICENSE b/security/sandbox/chromium/base/third_party/superfasthash/LICENSE
new file mode 100644
index 0000000000..3c40a3ecd7
--- /dev/null
+++ b/security/sandbox/chromium/base/third_party/superfasthash/LICENSE
@@ -0,0 +1,27 @@
+Paul Hsieh OLD BSD license
+
+Copyright (c) 2010, Paul Hsieh
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright notice, this
+ list of conditions and the following disclaimer in the documentation and/or
+ other materials provided with the distribution.
+* Neither my name, Paul Hsieh, nor the names of any other contributors to the
+ code use may not be used to endorse or promote products derived from this
+ software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/security/sandbox/chromium/base/third_party/superfasthash/README.chromium b/security/sandbox/chromium/base/third_party/superfasthash/README.chromium
new file mode 100644
index 0000000000..d41ed7724a
--- /dev/null
+++ b/security/sandbox/chromium/base/third_party/superfasthash/README.chromium
@@ -0,0 +1,29 @@
+Name: Paul Hsieh's SuperFastHash
+Short Name: SuperFastHash
+URL: http://www.azillionmonkeys.com/qed/hash.html
+Version: 0
+Date: 2012-02-21
+License: BSD
+License File: LICENSE
+Security Critical: yes
+
+Description:
+A fast string hashing algorithm.
+
+Local Modifications:
+- Added LICENSE.
+- Added license text as a comment to the top of superfasthash.c.
+- #include <stdint.h> instead of "pstdint.h".
+- #include <stdlib.h>.
+
+The license is a standard 3-clause BSD license with the following minor changes:
+
+"nor the names of its contributors may be used"
+is replaced with:
+"nor the names of any other contributors to the code use may not be used"
+
+and
+
+"IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE"
+is replaced with:
+"IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE"
diff --git a/security/sandbox/chromium/base/third_party/superfasthash/superfasthash.c b/security/sandbox/chromium/base/third_party/superfasthash/superfasthash.c
new file mode 100644
index 0000000000..6e7687e131
--- /dev/null
+++ b/security/sandbox/chromium/base/third_party/superfasthash/superfasthash.c
@@ -0,0 +1,84 @@
+// Copyright (c) 2010, Paul Hsieh
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice, this
+// list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither my name, Paul Hsieh, nor the names of any other contributors to the
+// code use may not be used to endorse or promote products derived from this
+// software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdint.h>
+#include <stdlib.h>
+#undef get16bits
+#if (defined(__GNUC__) && defined(__i386__)) || defined(__WATCOMC__) \
+ || defined(_MSC_VER) || defined (__BORLANDC__) || defined (__TURBOC__)
+#define get16bits(d) (*((const uint16_t *) (d)))
+#endif
+
+#if !defined (get16bits)
+#define get16bits(d) ((((uint32_t)(((const uint8_t *)(d))[1])) << 8)\
+ +(uint32_t)(((const uint8_t *)(d))[0]) )
+#endif
+
+uint32_t SuperFastHash (const char * data, int len) {
+uint32_t hash = len, tmp;
+int rem;
+
+ if (len <= 0 || data == NULL) return 0;
+
+ rem = len & 3;
+ len >>= 2;
+
+ /* Main loop */
+ for (;len > 0; len--) {
+ hash += get16bits (data);
+ tmp = (get16bits (data+2) << 11) ^ hash;
+ hash = (hash << 16) ^ tmp;
+ data += 2*sizeof (uint16_t);
+ hash += hash >> 11;
+ }
+
+ /* Handle end cases */
+ switch (rem) {
+ case 3: hash += get16bits (data);
+ hash ^= hash << 16;
+ hash ^= ((signed char)data[sizeof (uint16_t)]) << 18;
+ hash += hash >> 11;
+ break;
+ case 2: hash += get16bits (data);
+ hash ^= hash << 11;
+ hash += hash >> 17;
+ break;
+ case 1: hash += (signed char)*data;
+ hash ^= hash << 10;
+ hash += hash >> 1;
+ }
+
+ /* Force "avalanching" of final 127 bits */
+ hash ^= hash << 3;
+ hash += hash >> 5;
+ hash ^= hash << 4;
+ hash += hash >> 17;
+ hash ^= hash << 25;
+ hash += hash >> 6;
+
+ return hash;
+}
diff --git a/security/sandbox/chromium/base/third_party/valgrind/LICENSE b/security/sandbox/chromium/base/third_party/valgrind/LICENSE
new file mode 100644
index 0000000000..41f677bd17
--- /dev/null
+++ b/security/sandbox/chromium/base/third_party/valgrind/LICENSE
@@ -0,0 +1,39 @@
+ Notice that the following BSD-style license applies to the Valgrind header
+ files used by Chromium (valgrind.h and memcheck.h). However, the rest of
+ Valgrind is licensed under the terms of the GNU General Public License,
+ version 2, unless otherwise indicated.
+
+ ----------------------------------------------------------------
+
+ Copyright (C) 2000-2008 Julian Seward. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ 2. The origin of this software must not be misrepresented; you must
+ not claim that you wrote the original software. If you use this
+ software in a product, an acknowledgment in the product
+ documentation would be appreciated but is not required.
+
+ 3. Altered source versions must be plainly marked as such, and must
+ not be misrepresented as being the original software.
+
+ 4. The name of the author may not be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+ GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/security/sandbox/chromium/base/third_party/valgrind/valgrind.h b/security/sandbox/chromium/base/third_party/valgrind/valgrind.h
new file mode 100644
index 0000000000..0bae0aa130
--- /dev/null
+++ b/security/sandbox/chromium/base/third_party/valgrind/valgrind.h
@@ -0,0 +1,4792 @@
+/* -*- c -*-
+ ----------------------------------------------------------------
+
+ Notice that the following BSD-style license applies to this one
+ file (valgrind.h) only. The rest of Valgrind is licensed under the
+ terms of the GNU General Public License, version 2, unless
+ otherwise indicated. See the COPYING file in the source
+ distribution for details.
+
+ ----------------------------------------------------------------
+
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2010 Julian Seward. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ 2. The origin of this software must not be misrepresented; you must
+ not claim that you wrote the original software. If you use this
+ software in a product, an acknowledgment in the product
+ documentation would be appreciated but is not required.
+
+ 3. Altered source versions must be plainly marked as such, and must
+ not be misrepresented as being the original software.
+
+ 4. The name of the author may not be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+ GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ ----------------------------------------------------------------
+
+ Notice that the above BSD-style license applies to this one file
+ (valgrind.h) only. The entire rest of Valgrind is licensed under
+ the terms of the GNU General Public License, version 2. See the
+ COPYING file in the source distribution for details.
+
+ ----------------------------------------------------------------
+*/
+
+
+/* This file is for inclusion into client (your!) code.
+
+ You can use these macros to manipulate and query Valgrind's
+ execution inside your own programs.
+
+ The resulting executables will still run without Valgrind, just a
+ little bit more slowly than they otherwise would, but otherwise
+ unchanged. When not running on valgrind, each client request
+ consumes very few (eg. 7) instructions, so the resulting performance
+ loss is negligible unless you plan to execute client requests
+ millions of times per second. Nevertheless, if that is still a
+ problem, you can compile with the NVALGRIND symbol defined (gcc
+ -DNVALGRIND) so that client requests are not even compiled in. */
+
+#ifndef __VALGRIND_H
+#define __VALGRIND_H
+
+
+/* ------------------------------------------------------------------ */
+/* VERSION NUMBER OF VALGRIND */
+/* ------------------------------------------------------------------ */
+
+/* Specify Valgrind's version number, so that user code can
+ conditionally compile based on our version number. Note that these
+ were introduced at version 3.6 and so do not exist in version 3.5
+ or earlier. The recommended way to use them to check for "version
+ X.Y or later" is (eg)
+
+#if defined(__VALGRIND_MAJOR__) && defined(__VALGRIND_MINOR__) \
+ && (__VALGRIND_MAJOR__ > 3 \
+ || (__VALGRIND_MAJOR__ == 3 && __VALGRIND_MINOR__ >= 6))
+*/
+#define __VALGRIND_MAJOR__ 3
+#define __VALGRIND_MINOR__ 6
+
+
+#include <stdarg.h>
+
+/* Nb: this file might be included in a file compiled with -ansi. So
+ we can't use C++ style "//" comments nor the "asm" keyword (instead
+ use "__asm__"). */
+
+/* Derive some tags indicating what the target platform is. Note
+ that in this file we're using the compiler's CPP symbols for
+ identifying architectures, which are different to the ones we use
+ within the rest of Valgrind. Note, __powerpc__ is active for both
+ 32 and 64-bit PPC, whereas __powerpc64__ is only active for the
+ latter (on Linux, that is).
+
+ Misc note: how to find out what's predefined in gcc by default:
+ gcc -Wp,-dM somefile.c
+*/
+#undef PLAT_ppc64_aix5
+#undef PLAT_ppc32_aix5
+#undef PLAT_x86_darwin
+#undef PLAT_amd64_darwin
+#undef PLAT_x86_win32
+#undef PLAT_x86_linux
+#undef PLAT_amd64_linux
+#undef PLAT_ppc32_linux
+#undef PLAT_ppc64_linux
+#undef PLAT_arm_linux
+
+#if defined(_AIX) && defined(__64BIT__)
+# define PLAT_ppc64_aix5 1
+#elif defined(_AIX) && !defined(__64BIT__)
+# define PLAT_ppc32_aix5 1
+#elif defined(__APPLE__) && defined(__i386__)
+# define PLAT_x86_darwin 1
+#elif defined(__APPLE__) && defined(__x86_64__)
+# define PLAT_amd64_darwin 1
+#elif defined(__MINGW32__) || defined(__CYGWIN32__) || defined(_WIN32) && defined(_M_IX86)
+# define PLAT_x86_win32 1
+#elif defined(__linux__) && defined(__i386__)
+# define PLAT_x86_linux 1
+#elif defined(__linux__) && defined(__x86_64__)
+# define PLAT_amd64_linux 1
+#elif defined(__linux__) && defined(__powerpc__) && !defined(__powerpc64__)
+# define PLAT_ppc32_linux 1
+#elif defined(__linux__) && defined(__powerpc__) && defined(__powerpc64__)
+# define PLAT_ppc64_linux 1
+#elif defined(__linux__) && defined(__arm__)
+# define PLAT_arm_linux 1
+#else
+/* If we're not compiling for our target platform, don't generate
+ any inline asms. */
+# if !defined(NVALGRIND)
+# define NVALGRIND 1
+# endif
+#endif
+
+
+/* ------------------------------------------------------------------ */
+/* ARCHITECTURE SPECIFICS for SPECIAL INSTRUCTIONS. There is nothing */
+/* in here of use to end-users -- skip to the next section. */
+/* ------------------------------------------------------------------ */
+
+#if defined(NVALGRIND)
+
+/* Define NVALGRIND to completely remove the Valgrind magic sequence
+ from the compiled code (analogous to NDEBUG's effects on
+ assert()) */
+#define VALGRIND_DO_CLIENT_REQUEST( \
+ _zzq_rlval, _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ { \
+ (_zzq_rlval) = (_zzq_default); \
+ }
+
+#else /* ! NVALGRIND */
+
+/* The following defines the magic code sequences which the JITter
+ spots and handles magically. Don't look too closely at them as
+ they will rot your brain.
+
+ The assembly code sequences for all architectures is in this one
+ file. This is because this file must be stand-alone, and we don't
+ want to have multiple files.
+
+ For VALGRIND_DO_CLIENT_REQUEST, we must ensure that the default
+ value gets put in the return slot, so that everything works when
+ this is executed not under Valgrind. Args are passed in a memory
+ block, and so there's no intrinsic limit to the number that could
+ be passed, but it's currently five.
+
+ The macro args are:
+ _zzq_rlval result lvalue
+ _zzq_default default value (result returned when running on real CPU)
+ _zzq_request request code
+ _zzq_arg1..5 request params
+
+ The other two macros are used to support function wrapping, and are
+ a lot simpler. VALGRIND_GET_NR_CONTEXT returns the value of the
+ guest's NRADDR pseudo-register and whatever other information is
+ needed to safely run the call original from the wrapper: on
+ ppc64-linux, the R2 value at the divert point is also needed. This
+ information is abstracted into a user-visible type, OrigFn.
+
+ VALGRIND_CALL_NOREDIR_* behaves the same as the following on the
+ guest, but guarantees that the branch instruction will not be
+ redirected: x86: call *%eax, amd64: call *%rax, ppc32/ppc64:
+ branch-and-link-to-r11. VALGRIND_CALL_NOREDIR is just text, not a
+ complete inline asm, since it needs to be combined with more magic
+ inline asm stuff to be useful.
+*/
+
+/* ------------------------- x86-{linux,darwin} ---------------- */
+
+#if defined(PLAT_x86_linux) || defined(PLAT_x86_darwin) \
+ || (defined(PLAT_x86_win32) && defined(__GNUC__))
+
+typedef
+ struct {
+ unsigned int nraddr; /* where's the code? */
+ }
+ OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE \
+ "roll $3, %%edi ; roll $13, %%edi\n\t" \
+ "roll $29, %%edi ; roll $19, %%edi\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST( \
+ _zzq_rlval, _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ { volatile unsigned int _zzq_args[6]; \
+ volatile unsigned int _zzq_result; \
+ _zzq_args[0] = (unsigned int)(_zzq_request); \
+ _zzq_args[1] = (unsigned int)(_zzq_arg1); \
+ _zzq_args[2] = (unsigned int)(_zzq_arg2); \
+ _zzq_args[3] = (unsigned int)(_zzq_arg3); \
+ _zzq_args[4] = (unsigned int)(_zzq_arg4); \
+ _zzq_args[5] = (unsigned int)(_zzq_arg5); \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %EDX = client_request ( %EAX ) */ \
+ "xchgl %%ebx,%%ebx" \
+ : "=d" (_zzq_result) \
+ : "a" (&_zzq_args[0]), "0" (_zzq_default) \
+ : "cc", "memory" \
+ ); \
+ _zzq_rlval = _zzq_result; \
+ }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
+ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
+ volatile unsigned int __addr; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %EAX = guest_NRADDR */ \
+ "xchgl %%ecx,%%ecx" \
+ : "=a" (__addr) \
+ : \
+ : "cc", "memory" \
+ ); \
+ _zzq_orig->nraddr = __addr; \
+ }
+
+#define VALGRIND_CALL_NOREDIR_EAX \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* call-noredir *%EAX */ \
+ "xchgl %%edx,%%edx\n\t"
+#endif /* PLAT_x86_linux || PLAT_x86_darwin || (PLAT_x86_win32 && __GNUC__) */
+
+/* ------------------------- x86-Win32 ------------------------- */
+
+#if defined(PLAT_x86_win32) && !defined(__GNUC__)
+
+typedef
+ struct {
+ unsigned int nraddr; /* where's the code? */
+ }
+ OrigFn;
+
+#if defined(_MSC_VER)
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE \
+ __asm rol edi, 3 __asm rol edi, 13 \
+ __asm rol edi, 29 __asm rol edi, 19
+
+#define VALGRIND_DO_CLIENT_REQUEST( \
+ _zzq_rlval, _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ { volatile uintptr_t _zzq_args[6]; \
+ volatile unsigned int _zzq_result; \
+ _zzq_args[0] = (uintptr_t)(_zzq_request); \
+ _zzq_args[1] = (uintptr_t)(_zzq_arg1); \
+ _zzq_args[2] = (uintptr_t)(_zzq_arg2); \
+ _zzq_args[3] = (uintptr_t)(_zzq_arg3); \
+ _zzq_args[4] = (uintptr_t)(_zzq_arg4); \
+ _zzq_args[5] = (uintptr_t)(_zzq_arg5); \
+ __asm { __asm lea eax, _zzq_args __asm mov edx, _zzq_default \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %EDX = client_request ( %EAX ) */ \
+ __asm xchg ebx,ebx \
+ __asm mov _zzq_result, edx \
+ } \
+ _zzq_rlval = _zzq_result; \
+ }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
+ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
+ volatile unsigned int __addr; \
+ __asm { __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %EAX = guest_NRADDR */ \
+ __asm xchg ecx,ecx \
+ __asm mov __addr, eax \
+ } \
+ _zzq_orig->nraddr = __addr; \
+ }
+
+#define VALGRIND_CALL_NOREDIR_EAX ERROR
+
+#else
+#error Unsupported compiler.
+#endif
+
+#endif /* PLAT_x86_win32 */
+
+/* ------------------------ amd64-{linux,darwin} --------------- */
+
+#if defined(PLAT_amd64_linux) || defined(PLAT_amd64_darwin)
+
+typedef
+ struct {
+ unsigned long long int nraddr; /* where's the code? */
+ }
+ OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE \
+ "rolq $3, %%rdi ; rolq $13, %%rdi\n\t" \
+ "rolq $61, %%rdi ; rolq $51, %%rdi\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST( \
+ _zzq_rlval, _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ { volatile unsigned long long int _zzq_args[6]; \
+ volatile unsigned long long int _zzq_result; \
+ _zzq_args[0] = (unsigned long long int)(_zzq_request); \
+ _zzq_args[1] = (unsigned long long int)(_zzq_arg1); \
+ _zzq_args[2] = (unsigned long long int)(_zzq_arg2); \
+ _zzq_args[3] = (unsigned long long int)(_zzq_arg3); \
+ _zzq_args[4] = (unsigned long long int)(_zzq_arg4); \
+ _zzq_args[5] = (unsigned long long int)(_zzq_arg5); \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %RDX = client_request ( %RAX ) */ \
+ "xchgq %%rbx,%%rbx" \
+ : "=d" (_zzq_result) \
+ : "a" (&_zzq_args[0]), "0" (_zzq_default) \
+ : "cc", "memory" \
+ ); \
+ _zzq_rlval = _zzq_result; \
+ }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
+ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
+ volatile unsigned long long int __addr; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %RAX = guest_NRADDR */ \
+ "xchgq %%rcx,%%rcx" \
+ : "=a" (__addr) \
+ : \
+ : "cc", "memory" \
+ ); \
+ _zzq_orig->nraddr = __addr; \
+ }
+
+#define VALGRIND_CALL_NOREDIR_RAX \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* call-noredir *%RAX */ \
+ "xchgq %%rdx,%%rdx\n\t"
+#endif /* PLAT_amd64_linux || PLAT_amd64_darwin */
+
+/* ------------------------ ppc32-linux ------------------------ */
+
+#if defined(PLAT_ppc32_linux)
+
+typedef
+ struct {
+ unsigned int nraddr; /* where's the code? */
+ }
+ OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE \
+ "rlwinm 0,0,3,0,0 ; rlwinm 0,0,13,0,0\n\t" \
+ "rlwinm 0,0,29,0,0 ; rlwinm 0,0,19,0,0\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST( \
+ _zzq_rlval, _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ \
+ { unsigned int _zzq_args[6]; \
+ unsigned int _zzq_result; \
+ unsigned int* _zzq_ptr; \
+ _zzq_args[0] = (unsigned int)(_zzq_request); \
+ _zzq_args[1] = (unsigned int)(_zzq_arg1); \
+ _zzq_args[2] = (unsigned int)(_zzq_arg2); \
+ _zzq_args[3] = (unsigned int)(_zzq_arg3); \
+ _zzq_args[4] = (unsigned int)(_zzq_arg4); \
+ _zzq_args[5] = (unsigned int)(_zzq_arg5); \
+ _zzq_ptr = _zzq_args; \
+ __asm__ volatile("mr 3,%1\n\t" /*default*/ \
+ "mr 4,%2\n\t" /*ptr*/ \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %R3 = client_request ( %R4 ) */ \
+ "or 1,1,1\n\t" \
+ "mr %0,3" /*result*/ \
+ : "=b" (_zzq_result) \
+ : "b" (_zzq_default), "b" (_zzq_ptr) \
+ : "cc", "memory", "r3", "r4"); \
+ _zzq_rlval = _zzq_result; \
+ }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
+ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
+ unsigned int __addr; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %R3 = guest_NRADDR */ \
+ "or 2,2,2\n\t" \
+ "mr %0,3" \
+ : "=b" (__addr) \
+ : \
+ : "cc", "memory", "r3" \
+ ); \
+ _zzq_orig->nraddr = __addr; \
+ }
+
+#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* branch-and-link-to-noredir *%R11 */ \
+ "or 3,3,3\n\t"
+#endif /* PLAT_ppc32_linux */
+
+/* ------------------------ ppc64-linux ------------------------ */
+
+#if defined(PLAT_ppc64_linux)
+
+typedef
+ struct {
+ unsigned long long int nraddr; /* where's the code? */
+ unsigned long long int r2; /* what tocptr do we need? */
+ }
+ OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE \
+ "rotldi 0,0,3 ; rotldi 0,0,13\n\t" \
+ "rotldi 0,0,61 ; rotldi 0,0,51\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST( \
+ _zzq_rlval, _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ \
+ { unsigned long long int _zzq_args[6]; \
+ register unsigned long long int _zzq_result __asm__("r3"); \
+ register unsigned long long int* _zzq_ptr __asm__("r4"); \
+ _zzq_args[0] = (unsigned long long int)(_zzq_request); \
+ _zzq_args[1] = (unsigned long long int)(_zzq_arg1); \
+ _zzq_args[2] = (unsigned long long int)(_zzq_arg2); \
+ _zzq_args[3] = (unsigned long long int)(_zzq_arg3); \
+ _zzq_args[4] = (unsigned long long int)(_zzq_arg4); \
+ _zzq_args[5] = (unsigned long long int)(_zzq_arg5); \
+ _zzq_ptr = _zzq_args; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %R3 = client_request ( %R4 ) */ \
+ "or 1,1,1" \
+ : "=r" (_zzq_result) \
+ : "0" (_zzq_default), "r" (_zzq_ptr) \
+ : "cc", "memory"); \
+ _zzq_rlval = _zzq_result; \
+ }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
+ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
+ register unsigned long long int __addr __asm__("r3"); \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %R3 = guest_NRADDR */ \
+ "or 2,2,2" \
+ : "=r" (__addr) \
+ : \
+ : "cc", "memory" \
+ ); \
+ _zzq_orig->nraddr = __addr; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %R3 = guest_NRADDR_GPR2 */ \
+ "or 4,4,4" \
+ : "=r" (__addr) \
+ : \
+ : "cc", "memory" \
+ ); \
+ _zzq_orig->r2 = __addr; \
+ }
+
+#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* branch-and-link-to-noredir *%R11 */ \
+ "or 3,3,3\n\t"
+
+#endif /* PLAT_ppc64_linux */
+
+/* ------------------------- arm-linux ------------------------- */
+
+#if defined(PLAT_arm_linux)
+
+typedef
+ struct {
+ unsigned int nraddr; /* where's the code? */
+ }
+ OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE \
+ "mov r12, r12, ror #3 ; mov r12, r12, ror #13 \n\t" \
+ "mov r12, r12, ror #29 ; mov r12, r12, ror #19 \n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST( \
+ _zzq_rlval, _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ \
+ { volatile unsigned int _zzq_args[6]; \
+ volatile unsigned int _zzq_result; \
+ _zzq_args[0] = (unsigned int)(_zzq_request); \
+ _zzq_args[1] = (unsigned int)(_zzq_arg1); \
+ _zzq_args[2] = (unsigned int)(_zzq_arg2); \
+ _zzq_args[3] = (unsigned int)(_zzq_arg3); \
+ _zzq_args[4] = (unsigned int)(_zzq_arg4); \
+ _zzq_args[5] = (unsigned int)(_zzq_arg5); \
+ __asm__ volatile("mov r3, %1\n\t" /*default*/ \
+ "mov r4, %2\n\t" /*ptr*/ \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* R3 = client_request ( R4 ) */ \
+ "orr r10, r10, r10\n\t" \
+ "mov %0, r3" /*result*/ \
+ : "=r" (_zzq_result) \
+ : "r" (_zzq_default), "r" (&_zzq_args[0]) \
+ : "cc","memory", "r3", "r4"); \
+ _zzq_rlval = _zzq_result; \
+ }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
+ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
+ unsigned int __addr; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* R3 = guest_NRADDR */ \
+ "orr r11, r11, r11\n\t" \
+ "mov %0, r3" \
+ : "=r" (__addr) \
+ : \
+ : "cc", "memory", "r3" \
+ ); \
+ _zzq_orig->nraddr = __addr; \
+ }
+
+#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* branch-and-link-to-noredir *%R4 */ \
+ "orr r12, r12, r12\n\t"
+
+#endif /* PLAT_arm_linux */
+
+/* ------------------------ ppc32-aix5 ------------------------- */
+
+#if defined(PLAT_ppc32_aix5)
+
+typedef
+ struct {
+ unsigned int nraddr; /* where's the code? */
+ unsigned int r2; /* what tocptr do we need? */
+ }
+ OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE \
+ "rlwinm 0,0,3,0,0 ; rlwinm 0,0,13,0,0\n\t" \
+ "rlwinm 0,0,29,0,0 ; rlwinm 0,0,19,0,0\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST( \
+ _zzq_rlval, _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ \
+ { unsigned int _zzq_args[7]; \
+ register unsigned int _zzq_result; \
+ register unsigned int* _zzq_ptr; \
+ _zzq_args[0] = (unsigned int)(_zzq_request); \
+ _zzq_args[1] = (unsigned int)(_zzq_arg1); \
+ _zzq_args[2] = (unsigned int)(_zzq_arg2); \
+ _zzq_args[3] = (unsigned int)(_zzq_arg3); \
+ _zzq_args[4] = (unsigned int)(_zzq_arg4); \
+ _zzq_args[5] = (unsigned int)(_zzq_arg5); \
+ _zzq_args[6] = (unsigned int)(_zzq_default); \
+ _zzq_ptr = _zzq_args; \
+ __asm__ volatile("mr 4,%1\n\t" \
+ "lwz 3, 24(4)\n\t" \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %R3 = client_request ( %R4 ) */ \
+ "or 1,1,1\n\t" \
+ "mr %0,3" \
+ : "=b" (_zzq_result) \
+ : "b" (_zzq_ptr) \
+ : "r3", "r4", "cc", "memory"); \
+ _zzq_rlval = _zzq_result; \
+ }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
+ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
+ register unsigned int __addr; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %R3 = guest_NRADDR */ \
+ "or 2,2,2\n\t" \
+ "mr %0,3" \
+ : "=b" (__addr) \
+ : \
+ : "r3", "cc", "memory" \
+ ); \
+ _zzq_orig->nraddr = __addr; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %R3 = guest_NRADDR_GPR2 */ \
+ "or 4,4,4\n\t" \
+ "mr %0,3" \
+ : "=b" (__addr) \
+ : \
+ : "r3", "cc", "memory" \
+ ); \
+ _zzq_orig->r2 = __addr; \
+ }
+
+#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* branch-and-link-to-noredir *%R11 */ \
+ "or 3,3,3\n\t"
+
+#endif /* PLAT_ppc32_aix5 */
+
+/* ------------------------ ppc64-aix5 ------------------------- */
+
+#if defined(PLAT_ppc64_aix5)
+
+typedef
+ struct {
+ unsigned long long int nraddr; /* where's the code? */
+ unsigned long long int r2; /* what tocptr do we need? */
+ }
+ OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE \
+ "rotldi 0,0,3 ; rotldi 0,0,13\n\t" \
+ "rotldi 0,0,61 ; rotldi 0,0,51\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST( \
+ _zzq_rlval, _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ \
+ { unsigned long long int _zzq_args[7]; \
+ register unsigned long long int _zzq_result; \
+ register unsigned long long int* _zzq_ptr; \
+ _zzq_args[0] = (unsigned int long long)(_zzq_request); \
+ _zzq_args[1] = (unsigned int long long)(_zzq_arg1); \
+ _zzq_args[2] = (unsigned int long long)(_zzq_arg2); \
+ _zzq_args[3] = (unsigned int long long)(_zzq_arg3); \
+ _zzq_args[4] = (unsigned int long long)(_zzq_arg4); \
+ _zzq_args[5] = (unsigned int long long)(_zzq_arg5); \
+ _zzq_args[6] = (unsigned int long long)(_zzq_default); \
+ _zzq_ptr = _zzq_args; \
+ __asm__ volatile("mr 4,%1\n\t" \
+ "ld 3, 48(4)\n\t" \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %R3 = client_request ( %R4 ) */ \
+ "or 1,1,1\n\t" \
+ "mr %0,3" \
+ : "=b" (_zzq_result) \
+ : "b" (_zzq_ptr) \
+ : "r3", "r4", "cc", "memory"); \
+ _zzq_rlval = _zzq_result; \
+ }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
+ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
+ register unsigned long long int __addr; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %R3 = guest_NRADDR */ \
+ "or 2,2,2\n\t" \
+ "mr %0,3" \
+ : "=b" (__addr) \
+ : \
+ : "r3", "cc", "memory" \
+ ); \
+ _zzq_orig->nraddr = __addr; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %R3 = guest_NRADDR_GPR2 */ \
+ "or 4,4,4\n\t" \
+ "mr %0,3" \
+ : "=b" (__addr) \
+ : \
+ : "r3", "cc", "memory" \
+ ); \
+ _zzq_orig->r2 = __addr; \
+ }
+
+#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* branch-and-link-to-noredir *%R11 */ \
+ "or 3,3,3\n\t"
+
+#endif /* PLAT_ppc64_aix5 */
+
+/* Insert assembly code for other platforms here... */
+
+#endif /* NVALGRIND */
+
+
+/* ------------------------------------------------------------------ */
+/* PLATFORM SPECIFICS for FUNCTION WRAPPING. This is all very */
+/* ugly. It's the least-worst tradeoff I can think of. */
+/* ------------------------------------------------------------------ */
+
+/* This section defines magic (a.k.a appalling-hack) macros for doing
+ guaranteed-no-redirection macros, so as to get from function
+ wrappers to the functions they are wrapping. The whole point is to
+ construct standard call sequences, but to do the call itself with a
+ special no-redirect call pseudo-instruction that the JIT
+ understands and handles specially. This section is long and
+ repetitious, and I can't see a way to make it shorter.
+
+ The naming scheme is as follows:
+
+ CALL_FN_{W,v}_{v,W,WW,WWW,WWWW,5W,6W,7W,etc}
+
+ 'W' stands for "word" and 'v' for "void". Hence there are
+ different macros for calling arity 0, 1, 2, 3, 4, etc, functions,
+ and for each, the possibility of returning a word-typed result, or
+ no result.
+*/
+
+/* Use these to write the name of your wrapper. NOTE: duplicates
+ VG_WRAP_FUNCTION_Z{U,Z} in pub_tool_redir.h. */
+
+/* Use an extra level of macroisation so as to ensure the soname/fnname
+ args are fully macro-expanded before pasting them together. */
+#define VG_CONCAT4(_aa,_bb,_cc,_dd) _aa##_bb##_cc##_dd
+
+#define I_WRAP_SONAME_FNNAME_ZU(soname,fnname) \
+ VG_CONCAT4(_vgwZU_,soname,_,fnname)
+
+#define I_WRAP_SONAME_FNNAME_ZZ(soname,fnname) \
+ VG_CONCAT4(_vgwZZ_,soname,_,fnname)
+
+/* Use this macro from within a wrapper function to collect the
+ context (address and possibly other info) of the original function.
+ Once you have that you can then use it in one of the CALL_FN_
+ macros. The type of the argument _lval is OrigFn. */
+#define VALGRIND_GET_ORIG_FN(_lval) VALGRIND_GET_NR_CONTEXT(_lval)
+
+/* Derivatives of the main macros below, for calling functions
+ returning void. */
+
+#define CALL_FN_v_v(fnptr) \
+ do { volatile unsigned long _junk; \
+ CALL_FN_W_v(_junk,fnptr); } while (0)
+
+#define CALL_FN_v_W(fnptr, arg1) \
+ do { volatile unsigned long _junk; \
+ CALL_FN_W_W(_junk,fnptr,arg1); } while (0)
+
+#define CALL_FN_v_WW(fnptr, arg1,arg2) \
+ do { volatile unsigned long _junk; \
+ CALL_FN_W_WW(_junk,fnptr,arg1,arg2); } while (0)
+
+#define CALL_FN_v_WWW(fnptr, arg1,arg2,arg3) \
+ do { volatile unsigned long _junk; \
+ CALL_FN_W_WWW(_junk,fnptr,arg1,arg2,arg3); } while (0)
+
+#define CALL_FN_v_WWWW(fnptr, arg1,arg2,arg3,arg4) \
+ do { volatile unsigned long _junk; \
+ CALL_FN_W_WWWW(_junk,fnptr,arg1,arg2,arg3,arg4); } while (0)
+
+#define CALL_FN_v_5W(fnptr, arg1,arg2,arg3,arg4,arg5) \
+ do { volatile unsigned long _junk; \
+ CALL_FN_W_5W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5); } while (0)
+
+#define CALL_FN_v_6W(fnptr, arg1,arg2,arg3,arg4,arg5,arg6) \
+ do { volatile unsigned long _junk; \
+ CALL_FN_W_6W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5,arg6); } while (0)
+
+#define CALL_FN_v_7W(fnptr, arg1,arg2,arg3,arg4,arg5,arg6,arg7) \
+ do { volatile unsigned long _junk; \
+ CALL_FN_W_7W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5,arg6,arg7); } while (0)
+
+/* ------------------------- x86-{linux,darwin} ---------------- */
+
+#if defined(PLAT_x86_linux) || defined(PLAT_x86_darwin)
+
+/* These regs are trashed by the hidden call. No need to mention eax
+ as gcc can already see that, plus causes gcc to bomb. */
+#define __CALLER_SAVED_REGS /*"eax"*/ "ecx", "edx"
+
+/* These CALL_FN_ macros assume that on x86-linux, sizeof(unsigned
+ long) == 4. */
+
+#define CALL_FN_W_v(lval, orig) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[1]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ __asm__ volatile( \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[2]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ __asm__ volatile( \
+ "subl $12, %%esp\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $16, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ __asm__ volatile( \
+ "subl $8, %%esp\n\t" \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $16, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[4]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ __asm__ volatile( \
+ "subl $4, %%esp\n\t" \
+ "pushl 12(%%eax)\n\t" \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $16, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[5]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ __asm__ volatile( \
+ "pushl 16(%%eax)\n\t" \
+ "pushl 12(%%eax)\n\t" \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $16, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[6]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ __asm__ volatile( \
+ "subl $12, %%esp\n\t" \
+ "pushl 20(%%eax)\n\t" \
+ "pushl 16(%%eax)\n\t" \
+ "pushl 12(%%eax)\n\t" \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $32, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[7]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ __asm__ volatile( \
+ "subl $8, %%esp\n\t" \
+ "pushl 24(%%eax)\n\t" \
+ "pushl 20(%%eax)\n\t" \
+ "pushl 16(%%eax)\n\t" \
+ "pushl 12(%%eax)\n\t" \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $32, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[8]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ __asm__ volatile( \
+ "subl $4, %%esp\n\t" \
+ "pushl 28(%%eax)\n\t" \
+ "pushl 24(%%eax)\n\t" \
+ "pushl 20(%%eax)\n\t" \
+ "pushl 16(%%eax)\n\t" \
+ "pushl 12(%%eax)\n\t" \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $32, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[9]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ __asm__ volatile( \
+ "pushl 32(%%eax)\n\t" \
+ "pushl 28(%%eax)\n\t" \
+ "pushl 24(%%eax)\n\t" \
+ "pushl 20(%%eax)\n\t" \
+ "pushl 16(%%eax)\n\t" \
+ "pushl 12(%%eax)\n\t" \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $32, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[10]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ __asm__ volatile( \
+ "subl $12, %%esp\n\t" \
+ "pushl 36(%%eax)\n\t" \
+ "pushl 32(%%eax)\n\t" \
+ "pushl 28(%%eax)\n\t" \
+ "pushl 24(%%eax)\n\t" \
+ "pushl 20(%%eax)\n\t" \
+ "pushl 16(%%eax)\n\t" \
+ "pushl 12(%%eax)\n\t" \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $48, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[11]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ __asm__ volatile( \
+ "subl $8, %%esp\n\t" \
+ "pushl 40(%%eax)\n\t" \
+ "pushl 36(%%eax)\n\t" \
+ "pushl 32(%%eax)\n\t" \
+ "pushl 28(%%eax)\n\t" \
+ "pushl 24(%%eax)\n\t" \
+ "pushl 20(%%eax)\n\t" \
+ "pushl 16(%%eax)\n\t" \
+ "pushl 12(%%eax)\n\t" \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $48, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
+ arg6,arg7,arg8,arg9,arg10, \
+ arg11) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[12]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ _argvec[11] = (unsigned long)(arg11); \
+ __asm__ volatile( \
+ "subl $4, %%esp\n\t" \
+ "pushl 44(%%eax)\n\t" \
+ "pushl 40(%%eax)\n\t" \
+ "pushl 36(%%eax)\n\t" \
+ "pushl 32(%%eax)\n\t" \
+ "pushl 28(%%eax)\n\t" \
+ "pushl 24(%%eax)\n\t" \
+ "pushl 20(%%eax)\n\t" \
+ "pushl 16(%%eax)\n\t" \
+ "pushl 12(%%eax)\n\t" \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $48, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
+ arg6,arg7,arg8,arg9,arg10, \
+ arg11,arg12) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[13]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ _argvec[11] = (unsigned long)(arg11); \
+ _argvec[12] = (unsigned long)(arg12); \
+ __asm__ volatile( \
+ "pushl 48(%%eax)\n\t" \
+ "pushl 44(%%eax)\n\t" \
+ "pushl 40(%%eax)\n\t" \
+ "pushl 36(%%eax)\n\t" \
+ "pushl 32(%%eax)\n\t" \
+ "pushl 28(%%eax)\n\t" \
+ "pushl 24(%%eax)\n\t" \
+ "pushl 20(%%eax)\n\t" \
+ "pushl 16(%%eax)\n\t" \
+ "pushl 12(%%eax)\n\t" \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $48, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#endif /* PLAT_x86_linux || PLAT_x86_darwin */
+
+/* ------------------------ amd64-{linux,darwin} --------------- */
+
+#if defined(PLAT_amd64_linux) || defined(PLAT_amd64_darwin)
+
+/* ARGREGS: rdi rsi rdx rcx r8 r9 (the rest on stack in R-to-L order) */
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS /*"rax",*/ "rcx", "rdx", "rsi", \
+ "rdi", "r8", "r9", "r10", "r11"
+
+/* This is all pretty complex. It's so as to make stack unwinding
+ work reliably. See bug 243270. The basic problem is the sub and
+ add of 128 of %rsp in all of the following macros. If gcc believes
+ the CFA is in %rsp, then unwinding may fail, because what's at the
+ CFA is not what gcc "expected" when it constructs the CFIs for the
+ places where the macros are instantiated.
+
+ But we can't just add a CFI annotation to increase the CFA offset
+ by 128, to match the sub of 128 from %rsp, because we don't know
+ whether gcc has chosen %rsp as the CFA at that point, or whether it
+ has chosen some other register (eg, %rbp). In the latter case,
+ adding a CFI annotation to change the CFA offset is simply wrong.
+
+ So the solution is to get hold of the CFA using
+ __builtin_dwarf_cfa(), put it in a known register, and add a
+ CFI annotation to say what the register is. We choose %rbp for
+ this (perhaps perversely), because:
+
+ (1) %rbp is already subject to unwinding. If a new register was
+ chosen then the unwinder would have to unwind it in all stack
+ traces, which is expensive, and
+
+ (2) %rbp is already subject to precise exception updates in the
+ JIT. If a new register was chosen, we'd have to have precise
+ exceptions for it too, which reduces performance of the
+ generated code.
+
+ However .. one extra complication. We can't just whack the result
+ of __builtin_dwarf_cfa() into %rbp and then add %rbp to the
+ list of trashed registers at the end of the inline assembly
+ fragments; gcc won't allow %rbp to appear in that list. Hence
+ instead we need to stash %rbp in %r15 for the duration of the asm,
+ and say that %r15 is trashed instead. gcc seems happy to go with
+ that.
+
+ Oh .. and this all needs to be conditionalised so that it is
+ unchanged from before this commit, when compiled with older gccs
+ that don't support __builtin_dwarf_cfa. Furthermore, since
+ this header file is freestanding, it has to be independent of
+ config.h, and so the following conditionalisation cannot depend on
+ configure time checks.
+
+ Although it's not clear from
+ 'defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM)',
+ this expression excludes Darwin.
+ .cfi directives in Darwin assembly appear to be completely
+ different and I haven't investigated how they work.
+
+ For even more entertainment value, note we have to use the
+ completely undocumented __builtin_dwarf_cfa(), which appears to
+ really compute the CFA, whereas __builtin_frame_address(0) claims
+ to but actually doesn't. See
+ https://bugs.kde.org/show_bug.cgi?id=243270#c47
+*/
+#if defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM)
+# define __FRAME_POINTER \
+ ,"r"(__builtin_dwarf_cfa())
+# define VALGRIND_CFI_PROLOGUE \
+ "movq %%rbp, %%r15\n\t" \
+ "movq %2, %%rbp\n\t" \
+ ".cfi_remember_state\n\t" \
+ ".cfi_def_cfa rbp, 0\n\t"
+# define VALGRIND_CFI_EPILOGUE \
+ "movq %%r15, %%rbp\n\t" \
+ ".cfi_restore_state\n\t"
+#else
+# define __FRAME_POINTER
+# define VALGRIND_CFI_PROLOGUE
+# define VALGRIND_CFI_EPILOGUE
+#endif
+
+
+/* These CALL_FN_ macros assume that on amd64-linux, sizeof(unsigned
+ long) == 8. */
+
+/* NB 9 Sept 07. There is a nasty kludge here in all these CALL_FN_
+ macros. In order not to trash the stack redzone, we need to drop
+ %rsp by 128 before the hidden call, and restore afterwards. The
+ nastyness is that it is only by luck that the stack still appears
+ to be unwindable during the hidden call - since then the behaviour
+ of any routine using this macro does not match what the CFI data
+ says. Sigh.
+
+ Why is this important? Imagine that a wrapper has a stack
+ allocated local, and passes to the hidden call, a pointer to it.
+ Because gcc does not know about the hidden call, it may allocate
+ that local in the redzone. Unfortunately the hidden call may then
+ trash it before it comes to use it. So we must step clear of the
+ redzone, for the duration of the hidden call, to make it safe.
+
+ Probably the same problem afflicts the other redzone-style ABIs too
+ (ppc64-linux, ppc32-aix5, ppc64-aix5); but for those, the stack is
+ self describing (none of this CFI nonsense) so at least messing
+ with the stack pointer doesn't give a danger of non-unwindable
+ stack. */
+
+#define CALL_FN_W_v(lval, orig) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[1]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "subq $128,%%rsp\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $128,%%rsp\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[2]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "subq $128,%%rsp\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $128,%%rsp\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "subq $128,%%rsp\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $128,%%rsp\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[4]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "subq $128,%%rsp\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $128,%%rsp\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[5]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "subq $128,%%rsp\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $128,%%rsp\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[6]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "subq $128,%%rsp\n\t" \
+ "movq 40(%%rax), %%r8\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $128,%%rsp\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[7]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "subq $128,%%rsp\n\t" \
+ "movq 48(%%rax), %%r9\n\t" \
+ "movq 40(%%rax), %%r8\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $128,%%rsp\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[8]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "subq $136,%%rsp\n\t" \
+ "pushq 56(%%rax)\n\t" \
+ "movq 48(%%rax), %%r9\n\t" \
+ "movq 40(%%rax), %%r8\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $8, %%rsp\n" \
+ "addq $136,%%rsp\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[9]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "subq $128,%%rsp\n\t" \
+ "pushq 64(%%rax)\n\t" \
+ "pushq 56(%%rax)\n\t" \
+ "movq 48(%%rax), %%r9\n\t" \
+ "movq 40(%%rax), %%r8\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $16, %%rsp\n" \
+ "addq $128,%%rsp\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[10]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "subq $136,%%rsp\n\t" \
+ "pushq 72(%%rax)\n\t" \
+ "pushq 64(%%rax)\n\t" \
+ "pushq 56(%%rax)\n\t" \
+ "movq 48(%%rax), %%r9\n\t" \
+ "movq 40(%%rax), %%r8\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $24, %%rsp\n" \
+ "addq $136,%%rsp\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[11]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "subq $128,%%rsp\n\t" \
+ "pushq 80(%%rax)\n\t" \
+ "pushq 72(%%rax)\n\t" \
+ "pushq 64(%%rax)\n\t" \
+ "pushq 56(%%rax)\n\t" \
+ "movq 48(%%rax), %%r9\n\t" \
+ "movq 40(%%rax), %%r8\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $32, %%rsp\n" \
+ "addq $128,%%rsp\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10,arg11) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[12]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ _argvec[11] = (unsigned long)(arg11); \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "subq $136,%%rsp\n\t" \
+ "pushq 88(%%rax)\n\t" \
+ "pushq 80(%%rax)\n\t" \
+ "pushq 72(%%rax)\n\t" \
+ "pushq 64(%%rax)\n\t" \
+ "pushq 56(%%rax)\n\t" \
+ "movq 48(%%rax), %%r9\n\t" \
+ "movq 40(%%rax), %%r8\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $40, %%rsp\n" \
+ "addq $136,%%rsp\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10,arg11,arg12) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[13]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ _argvec[11] = (unsigned long)(arg11); \
+ _argvec[12] = (unsigned long)(arg12); \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "subq $128,%%rsp\n\t" \
+ "pushq 96(%%rax)\n\t" \
+ "pushq 88(%%rax)\n\t" \
+ "pushq 80(%%rax)\n\t" \
+ "pushq 72(%%rax)\n\t" \
+ "pushq 64(%%rax)\n\t" \
+ "pushq 56(%%rax)\n\t" \
+ "movq 48(%%rax), %%r9\n\t" \
+ "movq 40(%%rax), %%r8\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $48, %%rsp\n" \
+ "addq $128,%%rsp\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#endif /* PLAT_amd64_linux || PLAT_amd64_darwin */
+
+/* ------------------------ ppc32-linux ------------------------ */
+
+#if defined(PLAT_ppc32_linux)
+
+/* This is useful for finding out about the on-stack stuff:
+
+ extern int f9 ( int,int,int,int,int,int,int,int,int );
+ extern int f10 ( int,int,int,int,int,int,int,int,int,int );
+ extern int f11 ( int,int,int,int,int,int,int,int,int,int,int );
+ extern int f12 ( int,int,int,int,int,int,int,int,int,int,int,int );
+
+ int g9 ( void ) {
+ return f9(11,22,33,44,55,66,77,88,99);
+ }
+ int g10 ( void ) {
+ return f10(11,22,33,44,55,66,77,88,99,110);
+ }
+ int g11 ( void ) {
+ return f11(11,22,33,44,55,66,77,88,99,110,121);
+ }
+ int g12 ( void ) {
+ return f12(11,22,33,44,55,66,77,88,99,110,121,132);
+ }
+*/
+
+/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS \
+ "lr", "ctr", "xer", \
+ "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \
+ "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
+ "r11", "r12", "r13"
+
+/* These CALL_FN_ macros assume that on ppc32-linux,
+ sizeof(unsigned long) == 4. */
+
+#define CALL_FN_W_v(lval, orig) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[1]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[2]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[4]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 5,12(11)\n\t" \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[5]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 5,12(11)\n\t" \
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[6]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 5,12(11)\n\t" \
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7,20(11)\n\t" \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[7]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 5,12(11)\n\t" \
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7,20(11)\n\t" \
+ "lwz 8,24(11)\n\t" \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[8]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ _argvec[7] = (unsigned long)arg7; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 5,12(11)\n\t" \
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7,20(11)\n\t" \
+ "lwz 8,24(11)\n\t" \
+ "lwz 9,28(11)\n\t" \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[9]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ _argvec[7] = (unsigned long)arg7; \
+ _argvec[8] = (unsigned long)arg8; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 5,12(11)\n\t" \
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7,20(11)\n\t" \
+ "lwz 8,24(11)\n\t" \
+ "lwz 9,28(11)\n\t" \
+ "lwz 10,32(11)\n\t" /* arg8->r10 */ \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[10]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ _argvec[7] = (unsigned long)arg7; \
+ _argvec[8] = (unsigned long)arg8; \
+ _argvec[9] = (unsigned long)arg9; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "addi 1,1,-16\n\t" \
+ /* arg9 */ \
+ "lwz 3,36(11)\n\t" \
+ "stw 3,8(1)\n\t" \
+ /* args1-8 */ \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 5,12(11)\n\t" \
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7,20(11)\n\t" \
+ "lwz 8,24(11)\n\t" \
+ "lwz 9,28(11)\n\t" \
+ "lwz 10,32(11)\n\t" /* arg8->r10 */ \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "addi 1,1,16\n\t" \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[11]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ _argvec[7] = (unsigned long)arg7; \
+ _argvec[8] = (unsigned long)arg8; \
+ _argvec[9] = (unsigned long)arg9; \
+ _argvec[10] = (unsigned long)arg10; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "addi 1,1,-16\n\t" \
+ /* arg10 */ \
+ "lwz 3,40(11)\n\t" \
+ "stw 3,12(1)\n\t" \
+ /* arg9 */ \
+ "lwz 3,36(11)\n\t" \
+ "stw 3,8(1)\n\t" \
+ /* args1-8 */ \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 5,12(11)\n\t" \
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7,20(11)\n\t" \
+ "lwz 8,24(11)\n\t" \
+ "lwz 9,28(11)\n\t" \
+ "lwz 10,32(11)\n\t" /* arg8->r10 */ \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "addi 1,1,16\n\t" \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10,arg11) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[12]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ _argvec[7] = (unsigned long)arg7; \
+ _argvec[8] = (unsigned long)arg8; \
+ _argvec[9] = (unsigned long)arg9; \
+ _argvec[10] = (unsigned long)arg10; \
+ _argvec[11] = (unsigned long)arg11; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "addi 1,1,-32\n\t" \
+ /* arg11 */ \
+ "lwz 3,44(11)\n\t" \
+ "stw 3,16(1)\n\t" \
+ /* arg10 */ \
+ "lwz 3,40(11)\n\t" \
+ "stw 3,12(1)\n\t" \
+ /* arg9 */ \
+ "lwz 3,36(11)\n\t" \
+ "stw 3,8(1)\n\t" \
+ /* args1-8 */ \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 5,12(11)\n\t" \
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7,20(11)\n\t" \
+ "lwz 8,24(11)\n\t" \
+ "lwz 9,28(11)\n\t" \
+ "lwz 10,32(11)\n\t" /* arg8->r10 */ \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "addi 1,1,32\n\t" \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10,arg11,arg12) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[13]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ _argvec[7] = (unsigned long)arg7; \
+ _argvec[8] = (unsigned long)arg8; \
+ _argvec[9] = (unsigned long)arg9; \
+ _argvec[10] = (unsigned long)arg10; \
+ _argvec[11] = (unsigned long)arg11; \
+ _argvec[12] = (unsigned long)arg12; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "addi 1,1,-32\n\t" \
+ /* arg12 */ \
+ "lwz 3,48(11)\n\t" \
+ "stw 3,20(1)\n\t" \
+ /* arg11 */ \
+ "lwz 3,44(11)\n\t" \
+ "stw 3,16(1)\n\t" \
+ /* arg10 */ \
+ "lwz 3,40(11)\n\t" \
+ "stw 3,12(1)\n\t" \
+ /* arg9 */ \
+ "lwz 3,36(11)\n\t" \
+ "stw 3,8(1)\n\t" \
+ /* args1-8 */ \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 5,12(11)\n\t" \
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7,20(11)\n\t" \
+ "lwz 8,24(11)\n\t" \
+ "lwz 9,28(11)\n\t" \
+ "lwz 10,32(11)\n\t" /* arg8->r10 */ \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "addi 1,1,32\n\t" \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#endif /* PLAT_ppc32_linux */
+
+/* ------------------------ ppc64-linux ------------------------ */
+
+#if defined(PLAT_ppc64_linux)
+
+/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS \
+ "lr", "ctr", "xer", \
+ "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \
+ "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
+ "r11", "r12", "r13"
+
+/* These CALL_FN_ macros assume that on ppc64-linux, sizeof(unsigned
+ long) == 8. */
+
+#define CALL_FN_W_v(lval, orig) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+0]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)" /* restore tocptr */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+1]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)" /* restore tocptr */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+2]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)" /* restore tocptr */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+3]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)" /* restore tocptr */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+4]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)" /* restore tocptr */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+5]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)" /* restore tocptr */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+6]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)" /* restore tocptr */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+7]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)" /* restore tocptr */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+8]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)" /* restore tocptr */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+9]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "addi 1,1,-128\n\t" /* expand stack frame */ \
+ /* arg9 */ \
+ "ld 3,72(11)\n\t" \
+ "std 3,112(1)\n\t" \
+ /* args1-8 */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ "addi 1,1,128" /* restore frame */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+10]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ _argvec[2+10] = (unsigned long)arg10; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "addi 1,1,-128\n\t" /* expand stack frame */ \
+ /* arg10 */ \
+ "ld 3,80(11)\n\t" \
+ "std 3,120(1)\n\t" \
+ /* arg9 */ \
+ "ld 3,72(11)\n\t" \
+ "std 3,112(1)\n\t" \
+ /* args1-8 */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ "addi 1,1,128" /* restore frame */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10,arg11) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+11]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ _argvec[2+10] = (unsigned long)arg10; \
+ _argvec[2+11] = (unsigned long)arg11; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "addi 1,1,-144\n\t" /* expand stack frame */ \
+ /* arg11 */ \
+ "ld 3,88(11)\n\t" \
+ "std 3,128(1)\n\t" \
+ /* arg10 */ \
+ "ld 3,80(11)\n\t" \
+ "std 3,120(1)\n\t" \
+ /* arg9 */ \
+ "ld 3,72(11)\n\t" \
+ "std 3,112(1)\n\t" \
+ /* args1-8 */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ "addi 1,1,144" /* restore frame */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10,arg11,arg12) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+12]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ _argvec[2+10] = (unsigned long)arg10; \
+ _argvec[2+11] = (unsigned long)arg11; \
+ _argvec[2+12] = (unsigned long)arg12; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "addi 1,1,-144\n\t" /* expand stack frame */ \
+ /* arg12 */ \
+ "ld 3,96(11)\n\t" \
+ "std 3,136(1)\n\t" \
+ /* arg11 */ \
+ "ld 3,88(11)\n\t" \
+ "std 3,128(1)\n\t" \
+ /* arg10 */ \
+ "ld 3,80(11)\n\t" \
+ "std 3,120(1)\n\t" \
+ /* arg9 */ \
+ "ld 3,72(11)\n\t" \
+ "std 3,112(1)\n\t" \
+ /* args1-8 */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ "addi 1,1,144" /* restore frame */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#endif /* PLAT_ppc64_linux */
+
+/* ------------------------- arm-linux ------------------------- */
+
+#if defined(PLAT_arm_linux)
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS "r0", "r1", "r2", "r3","r4","r14"
+
+/* These CALL_FN_ macros assume that on arm-linux, sizeof(unsigned
+ long) == 4. */
+
+#define CALL_FN_W_v(lval, orig) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[1]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ __asm__ volatile( \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "mov %0, r0\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[2]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ __asm__ volatile( \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "mov %0, r0\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ __asm__ volatile( \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "mov %0, r0\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[4]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ __asm__ volatile( \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "mov %0, r0\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[5]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ __asm__ volatile( \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r3, [%1, #16] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "mov %0, r0" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[6]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ __asm__ volatile( \
+ "ldr r0, [%1, #20] \n\t" \
+ "push {r0} \n\t" \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r3, [%1, #16] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "add sp, sp, #4 \n\t" \
+ "mov %0, r0" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[7]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ __asm__ volatile( \
+ "ldr r0, [%1, #20] \n\t" \
+ "ldr r1, [%1, #24] \n\t" \
+ "push {r0, r1} \n\t" \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r3, [%1, #16] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "add sp, sp, #8 \n\t" \
+ "mov %0, r0" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[8]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ __asm__ volatile( \
+ "ldr r0, [%1, #20] \n\t" \
+ "ldr r1, [%1, #24] \n\t" \
+ "ldr r2, [%1, #28] \n\t" \
+ "push {r0, r1, r2} \n\t" \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r3, [%1, #16] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "add sp, sp, #12 \n\t" \
+ "mov %0, r0" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[9]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ __asm__ volatile( \
+ "ldr r0, [%1, #20] \n\t" \
+ "ldr r1, [%1, #24] \n\t" \
+ "ldr r2, [%1, #28] \n\t" \
+ "ldr r3, [%1, #32] \n\t" \
+ "push {r0, r1, r2, r3} \n\t" \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r3, [%1, #16] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "add sp, sp, #16 \n\t" \
+ "mov %0, r0" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[10]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ __asm__ volatile( \
+ "ldr r0, [%1, #20] \n\t" \
+ "ldr r1, [%1, #24] \n\t" \
+ "ldr r2, [%1, #28] \n\t" \
+ "ldr r3, [%1, #32] \n\t" \
+ "ldr r4, [%1, #36] \n\t" \
+ "push {r0, r1, r2, r3, r4} \n\t" \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r3, [%1, #16] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "add sp, sp, #20 \n\t" \
+ "mov %0, r0" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[11]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ __asm__ volatile( \
+ "ldr r0, [%1, #40] \n\t" \
+ "push {r0} \n\t" \
+ "ldr r0, [%1, #20] \n\t" \
+ "ldr r1, [%1, #24] \n\t" \
+ "ldr r2, [%1, #28] \n\t" \
+ "ldr r3, [%1, #32] \n\t" \
+ "ldr r4, [%1, #36] \n\t" \
+ "push {r0, r1, r2, r3, r4} \n\t" \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r3, [%1, #16] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "add sp, sp, #24 \n\t" \
+ "mov %0, r0" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
+ arg6,arg7,arg8,arg9,arg10, \
+ arg11) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[12]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ _argvec[11] = (unsigned long)(arg11); \
+ __asm__ volatile( \
+ "ldr r0, [%1, #40] \n\t" \
+ "ldr r1, [%1, #44] \n\t" \
+ "push {r0, r1} \n\t" \
+ "ldr r0, [%1, #20] \n\t" \
+ "ldr r1, [%1, #24] \n\t" \
+ "ldr r2, [%1, #28] \n\t" \
+ "ldr r3, [%1, #32] \n\t" \
+ "ldr r4, [%1, #36] \n\t" \
+ "push {r0, r1, r2, r3, r4} \n\t" \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r3, [%1, #16] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "add sp, sp, #28 \n\t" \
+ "mov %0, r0" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory",__CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
+ arg6,arg7,arg8,arg9,arg10, \
+ arg11,arg12) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[13]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ _argvec[11] = (unsigned long)(arg11); \
+ _argvec[12] = (unsigned long)(arg12); \
+ __asm__ volatile( \
+ "ldr r0, [%1, #40] \n\t" \
+ "ldr r1, [%1, #44] \n\t" \
+ "ldr r2, [%1, #48] \n\t" \
+ "push {r0, r1, r2} \n\t" \
+ "ldr r0, [%1, #20] \n\t" \
+ "ldr r1, [%1, #24] \n\t" \
+ "ldr r2, [%1, #28] \n\t" \
+ "ldr r3, [%1, #32] \n\t" \
+ "ldr r4, [%1, #36] \n\t" \
+ "push {r0, r1, r2, r3, r4} \n\t" \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r3, [%1, #16] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "add sp, sp, #32 \n\t" \
+ "mov %0, r0" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#endif /* PLAT_arm_linux */
+
+/* ------------------------ ppc32-aix5 ------------------------- */
+
+#if defined(PLAT_ppc32_aix5)
+
+/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS \
+ "lr", "ctr", "xer", \
+ "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \
+ "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
+ "r11", "r12", "r13"
+
+/* Expand the stack frame, copying enough info that unwinding
+ still works. Trashes r3. */
+
+#define VG_EXPAND_FRAME_BY_trashes_r3(_n_fr) \
+ "addi 1,1,-" #_n_fr "\n\t" \
+ "lwz 3," #_n_fr "(1)\n\t" \
+ "stw 3,0(1)\n\t"
+
+#define VG_CONTRACT_FRAME_BY(_n_fr) \
+ "addi 1,1," #_n_fr "\n\t"
+
+/* These CALL_FN_ macros assume that on ppc32-aix5, sizeof(unsigned
+ long) == 4. */
+
+#define CALL_FN_W_v(lval, orig) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+0]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+1]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+2]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+3]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+4]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
+ "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+5]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
+ "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+6]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
+ "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
+ "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+7]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
+ "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
+ "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
+ "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+8]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
+ "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
+ "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
+ "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
+ "lwz 10, 32(11)\n\t" /* arg8->r10 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+9]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ VG_EXPAND_FRAME_BY_trashes_r3(64) \
+ /* arg9 */ \
+ "lwz 3,36(11)\n\t" \
+ "stw 3,56(1)\n\t" \
+ /* args1-8 */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
+ "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
+ "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
+ "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
+ "lwz 10, 32(11)\n\t" /* arg8->r10 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(64) \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+10]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ _argvec[2+10] = (unsigned long)arg10; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ VG_EXPAND_FRAME_BY_trashes_r3(64) \
+ /* arg10 */ \
+ "lwz 3,40(11)\n\t" \
+ "stw 3,60(1)\n\t" \
+ /* arg9 */ \
+ "lwz 3,36(11)\n\t" \
+ "stw 3,56(1)\n\t" \
+ /* args1-8 */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
+ "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
+ "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
+ "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
+ "lwz 10, 32(11)\n\t" /* arg8->r10 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(64) \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10,arg11) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+11]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ _argvec[2+10] = (unsigned long)arg10; \
+ _argvec[2+11] = (unsigned long)arg11; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ VG_EXPAND_FRAME_BY_trashes_r3(72) \
+ /* arg11 */ \
+ "lwz 3,44(11)\n\t" \
+ "stw 3,64(1)\n\t" \
+ /* arg10 */ \
+ "lwz 3,40(11)\n\t" \
+ "stw 3,60(1)\n\t" \
+ /* arg9 */ \
+ "lwz 3,36(11)\n\t" \
+ "stw 3,56(1)\n\t" \
+ /* args1-8 */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
+ "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
+ "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
+ "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
+ "lwz 10, 32(11)\n\t" /* arg8->r10 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(72) \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10,arg11,arg12) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+12]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ _argvec[2+10] = (unsigned long)arg10; \
+ _argvec[2+11] = (unsigned long)arg11; \
+ _argvec[2+12] = (unsigned long)arg12; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ VG_EXPAND_FRAME_BY_trashes_r3(72) \
+ /* arg12 */ \
+ "lwz 3,48(11)\n\t" \
+ "stw 3,68(1)\n\t" \
+ /* arg11 */ \
+ "lwz 3,44(11)\n\t" \
+ "stw 3,64(1)\n\t" \
+ /* arg10 */ \
+ "lwz 3,40(11)\n\t" \
+ "stw 3,60(1)\n\t" \
+ /* arg9 */ \
+ "lwz 3,36(11)\n\t" \
+ "stw 3,56(1)\n\t" \
+ /* args1-8 */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
+ "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
+ "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
+ "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
+ "lwz 10, 32(11)\n\t" /* arg8->r10 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(72) \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#endif /* PLAT_ppc32_aix5 */
+
+/* ------------------------ ppc64-aix5 ------------------------- */
+
+#if defined(PLAT_ppc64_aix5)
+
+/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS \
+ "lr", "ctr", "xer", \
+ "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \
+ "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
+ "r11", "r12", "r13"
+
+/* Expand the stack frame, copying enough info that unwinding
+ still works. Trashes r3. */
+
+#define VG_EXPAND_FRAME_BY_trashes_r3(_n_fr) \
+ "addi 1,1,-" #_n_fr "\n\t" \
+ "ld 3," #_n_fr "(1)\n\t" \
+ "std 3,0(1)\n\t"
+
+#define VG_CONTRACT_FRAME_BY(_n_fr) \
+ "addi 1,1," #_n_fr "\n\t"
+
+/* These CALL_FN_ macros assume that on ppc64-aix5, sizeof(unsigned
+ long) == 8. */
+
+#define CALL_FN_W_v(lval, orig) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+0]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+1]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+2]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+3]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+4]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+5]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+6]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+7]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+8]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+9]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ VG_EXPAND_FRAME_BY_trashes_r3(128) \
+ /* arg9 */ \
+ "ld 3,72(11)\n\t" \
+ "std 3,112(1)\n\t" \
+ /* args1-8 */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(128) \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+10]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ _argvec[2+10] = (unsigned long)arg10; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ VG_EXPAND_FRAME_BY_trashes_r3(128) \
+ /* arg10 */ \
+ "ld 3,80(11)\n\t" \
+ "std 3,120(1)\n\t" \
+ /* arg9 */ \
+ "ld 3,72(11)\n\t" \
+ "std 3,112(1)\n\t" \
+ /* args1-8 */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(128) \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10,arg11) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+11]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ _argvec[2+10] = (unsigned long)arg10; \
+ _argvec[2+11] = (unsigned long)arg11; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ VG_EXPAND_FRAME_BY_trashes_r3(144) \
+ /* arg11 */ \
+ "ld 3,88(11)\n\t" \
+ "std 3,128(1)\n\t" \
+ /* arg10 */ \
+ "ld 3,80(11)\n\t" \
+ "std 3,120(1)\n\t" \
+ /* arg9 */ \
+ "ld 3,72(11)\n\t" \
+ "std 3,112(1)\n\t" \
+ /* args1-8 */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(144) \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10,arg11,arg12) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+12]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ _argvec[2+10] = (unsigned long)arg10; \
+ _argvec[2+11] = (unsigned long)arg11; \
+ _argvec[2+12] = (unsigned long)arg12; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ VG_EXPAND_FRAME_BY_trashes_r3(144) \
+ /* arg12 */ \
+ "ld 3,96(11)\n\t" \
+ "std 3,136(1)\n\t" \
+ /* arg11 */ \
+ "ld 3,88(11)\n\t" \
+ "std 3,128(1)\n\t" \
+ /* arg10 */ \
+ "ld 3,80(11)\n\t" \
+ "std 3,120(1)\n\t" \
+ /* arg9 */ \
+ "ld 3,72(11)\n\t" \
+ "std 3,112(1)\n\t" \
+ /* args1-8 */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(144) \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#endif /* PLAT_ppc64_aix5 */
+
+
+/* ------------------------------------------------------------------ */
+/* ARCHITECTURE INDEPENDENT MACROS for CLIENT REQUESTS. */
+/* */
+/* ------------------------------------------------------------------ */
+
+/* Some request codes. There are many more of these, but most are not
+ exposed to end-user view. These are the public ones, all of the
+ form 0x1000 + small_number.
+
+ Core ones are in the range 0x00000000--0x0000ffff. The non-public
+ ones start at 0x2000.
+*/
+
+/* These macros are used by tools -- they must be public, but don't
+ embed them into other programs. */
+#define VG_USERREQ_TOOL_BASE(a,b) \
+ ((unsigned int)(((a)&0xff) << 24 | ((b)&0xff) << 16))
+#define VG_IS_TOOL_USERREQ(a, b, v) \
+ (VG_USERREQ_TOOL_BASE(a,b) == ((v) & 0xffff0000))
+
+/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !!
+ This enum comprises an ABI exported by Valgrind to programs
+ which use client requests. DO NOT CHANGE THE ORDER OF THESE
+ ENTRIES, NOR DELETE ANY -- add new ones at the end. */
+typedef
+ enum { VG_USERREQ__RUNNING_ON_VALGRIND = 0x1001,
+ VG_USERREQ__DISCARD_TRANSLATIONS = 0x1002,
+
+ /* These allow any function to be called from the simulated
+ CPU but run on the real CPU. Nb: the first arg passed to
+ the function is always the ThreadId of the running
+ thread! So CLIENT_CALL0 actually requires a 1 arg
+ function, etc. */
+ VG_USERREQ__CLIENT_CALL0 = 0x1101,
+ VG_USERREQ__CLIENT_CALL1 = 0x1102,
+ VG_USERREQ__CLIENT_CALL2 = 0x1103,
+ VG_USERREQ__CLIENT_CALL3 = 0x1104,
+
+ /* Can be useful in regression testing suites -- eg. can
+ send Valgrind's output to /dev/null and still count
+ errors. */
+ VG_USERREQ__COUNT_ERRORS = 0x1201,
+
+ /* These are useful and can be interpreted by any tool that
+ tracks malloc() et al, by using vg_replace_malloc.c. */
+ VG_USERREQ__MALLOCLIKE_BLOCK = 0x1301,
+ VG_USERREQ__FREELIKE_BLOCK = 0x1302,
+ /* Memory pool support. */
+ VG_USERREQ__CREATE_MEMPOOL = 0x1303,
+ VG_USERREQ__DESTROY_MEMPOOL = 0x1304,
+ VG_USERREQ__MEMPOOL_ALLOC = 0x1305,
+ VG_USERREQ__MEMPOOL_FREE = 0x1306,
+ VG_USERREQ__MEMPOOL_TRIM = 0x1307,
+ VG_USERREQ__MOVE_MEMPOOL = 0x1308,
+ VG_USERREQ__MEMPOOL_CHANGE = 0x1309,
+ VG_USERREQ__MEMPOOL_EXISTS = 0x130a,
+
+ /* Allow printfs to valgrind log. */
+ /* The first two pass the va_list argument by value, which
+ assumes it is the same size as or smaller than a UWord,
+ which generally isn't the case. Hence are deprecated.
+ The second two pass the vargs by reference and so are
+ immune to this problem. */
+ /* both :: char* fmt, va_list vargs (DEPRECATED) */
+ VG_USERREQ__PRINTF = 0x1401,
+ VG_USERREQ__PRINTF_BACKTRACE = 0x1402,
+ /* both :: char* fmt, va_list* vargs */
+ VG_USERREQ__PRINTF_VALIST_BY_REF = 0x1403,
+ VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF = 0x1404,
+
+ /* Stack support. */
+ VG_USERREQ__STACK_REGISTER = 0x1501,
+ VG_USERREQ__STACK_DEREGISTER = 0x1502,
+ VG_USERREQ__STACK_CHANGE = 0x1503,
+
+ /* Wine support */
+ VG_USERREQ__LOAD_PDB_DEBUGINFO = 0x1601,
+
+ /* Querying of debug info. */
+ VG_USERREQ__MAP_IP_TO_SRCLOC = 0x1701
+ } Vg_ClientRequest;
+
+#if !defined(__GNUC__)
+# define __extension__ /* */
+#endif
+
+
+/*
+ * VALGRIND_DO_CLIENT_REQUEST_EXPR(): a C expression that invokes a Valgrind
+ * client request and whose value equals the client request result.
+ */
+
+#if defined(NVALGRIND)
+
+#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
+ _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ (_zzq_default)
+
+#else /*defined(NVALGRIND)*/
+
+#if defined(_MSC_VER)
+
+#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
+ _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ (vg_VALGRIND_DO_CLIENT_REQUEST_EXPR((uintptr_t)(_zzq_default), \
+ (_zzq_request), (uintptr_t)(_zzq_arg1), (uintptr_t)(_zzq_arg2), \
+ (uintptr_t)(_zzq_arg3), (uintptr_t)(_zzq_arg4), \
+ (uintptr_t)(_zzq_arg5)))
+
+static __inline unsigned
+vg_VALGRIND_DO_CLIENT_REQUEST_EXPR(uintptr_t _zzq_default,
+ unsigned _zzq_request, uintptr_t _zzq_arg1,
+ uintptr_t _zzq_arg2, uintptr_t _zzq_arg3,
+ uintptr_t _zzq_arg4, uintptr_t _zzq_arg5)
+{
+ unsigned _zzq_rlval;
+ VALGRIND_DO_CLIENT_REQUEST(_zzq_rlval, _zzq_default, _zzq_request,
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5);
+ return _zzq_rlval;
+}
+
+#else /*defined(_MSC_VER)*/
+
+#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
+ _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ (__extension__({unsigned int _zzq_rlval; \
+ VALGRIND_DO_CLIENT_REQUEST(_zzq_rlval, _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ _zzq_rlval; \
+ }))
+
+#endif /*defined(_MSC_VER)*/
+
+#endif /*defined(NVALGRIND)*/
+
+
+/* Returns the number of Valgrinds this code is running under. That
+ is, 0 if running natively, 1 if running under Valgrind, 2 if
+ running under Valgrind which is running under another Valgrind,
+ etc. */
+#define RUNNING_ON_VALGRIND \
+ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* if not */, \
+ VG_USERREQ__RUNNING_ON_VALGRIND, \
+ 0, 0, 0, 0, 0) \
+
+
+/* Discard translation of code in the range [_qzz_addr .. _qzz_addr +
+ _qzz_len - 1]. Useful if you are debugging a JITter or some such,
+ since it provides a way to make sure valgrind will retranslate the
+ invalidated area. Returns no value. */
+#define VALGRIND_DISCARD_TRANSLATIONS(_qzz_addr,_qzz_len) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__DISCARD_TRANSLATIONS, \
+ _qzz_addr, _qzz_len, 0, 0, 0); \
+ }
+
+
+/* These requests are for getting Valgrind itself to print something.
+ Possibly with a backtrace. This is a really ugly hack. The return value
+ is the number of characters printed, excluding the "**<pid>** " part at the
+ start and the backtrace (if present). */
+
+#if defined(NVALGRIND)
+
+# define VALGRIND_PRINTF(...)
+# define VALGRIND_PRINTF_BACKTRACE(...)
+
+#else /* NVALGRIND */
+
+#if !defined(_MSC_VER)
+/* Modern GCC will optimize the static routine out if unused,
+ and unused attribute will shut down warnings about it. */
+static int VALGRIND_PRINTF(const char *format, ...)
+ __attribute__((format(__printf__, 1, 2), __unused__));
+#endif
+static int
+#if defined(_MSC_VER)
+__inline
+#endif
+VALGRIND_PRINTF(const char *format, ...)
+{
+ unsigned long _qzz_res;
+ va_list vargs;
+ va_start(vargs, format);
+#if defined(_MSC_VER)
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,
+ VG_USERREQ__PRINTF_VALIST_BY_REF,
+ (uintptr_t)format,
+ (uintptr_t)&vargs,
+ 0, 0, 0);
+#else
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,
+ VG_USERREQ__PRINTF_VALIST_BY_REF,
+ (unsigned long)format,
+ (unsigned long)&vargs,
+ 0, 0, 0);
+#endif
+ va_end(vargs);
+ return (int)_qzz_res;
+}
+
+#if !defined(_MSC_VER)
+static int VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
+ __attribute__((format(__printf__, 1, 2), __unused__));
+#endif
+static int
+#if defined(_MSC_VER)
+__inline
+#endif
+VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
+{
+ unsigned long _qzz_res;
+ va_list vargs;
+ va_start(vargs, format);
+#if defined(_MSC_VER)
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,
+ VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF,
+ (uintptr_t)format,
+ (uintptr_t)&vargs,
+ 0, 0, 0);
+#else
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,
+ VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF,
+ (unsigned long)format,
+ (unsigned long)&vargs,
+ 0, 0, 0);
+#endif
+ va_end(vargs);
+ return (int)_qzz_res;
+}
+
+#endif /* NVALGRIND */
+
+
+/* These requests allow control to move from the simulated CPU to the
+ real CPU, calling an arbitary function.
+
+ Note that the current ThreadId is inserted as the first argument.
+ So this call:
+
+ VALGRIND_NON_SIMD_CALL2(f, arg1, arg2)
+
+ requires f to have this signature:
+
+ Word f(Word tid, Word arg1, Word arg2)
+
+ where "Word" is a word-sized type.
+
+ Note that these client requests are not entirely reliable. For example,
+ if you call a function with them that subsequently calls printf(),
+ there's a high chance Valgrind will crash. Generally, your prospects of
+ these working are made higher if the called function does not refer to
+ any global variables, and does not refer to any libc or other functions
+ (printf et al). Any kind of entanglement with libc or dynamic linking is
+ likely to have a bad outcome, for tricky reasons which we've grappled
+ with a lot in the past.
+*/
+#define VALGRIND_NON_SIMD_CALL0(_qyy_fn) \
+ __extension__ \
+ ({unsigned long _qyy_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
+ VG_USERREQ__CLIENT_CALL0, \
+ _qyy_fn, \
+ 0, 0, 0, 0); \
+ _qyy_res; \
+ })
+
+#define VALGRIND_NON_SIMD_CALL1(_qyy_fn, _qyy_arg1) \
+ __extension__ \
+ ({unsigned long _qyy_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
+ VG_USERREQ__CLIENT_CALL1, \
+ _qyy_fn, \
+ _qyy_arg1, 0, 0, 0); \
+ _qyy_res; \
+ })
+
+#define VALGRIND_NON_SIMD_CALL2(_qyy_fn, _qyy_arg1, _qyy_arg2) \
+ __extension__ \
+ ({unsigned long _qyy_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
+ VG_USERREQ__CLIENT_CALL2, \
+ _qyy_fn, \
+ _qyy_arg1, _qyy_arg2, 0, 0); \
+ _qyy_res; \
+ })
+
+#define VALGRIND_NON_SIMD_CALL3(_qyy_fn, _qyy_arg1, _qyy_arg2, _qyy_arg3) \
+ __extension__ \
+ ({unsigned long _qyy_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
+ VG_USERREQ__CLIENT_CALL3, \
+ _qyy_fn, \
+ _qyy_arg1, _qyy_arg2, \
+ _qyy_arg3, 0); \
+ _qyy_res; \
+ })
+
+
+/* Counts the number of errors that have been recorded by a tool. Nb:
+ the tool must record the errors with VG_(maybe_record_error)() or
+ VG_(unique_error)() for them to be counted. */
+#define VALGRIND_COUNT_ERRORS \
+ __extension__ \
+ ({unsigned int _qyy_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
+ VG_USERREQ__COUNT_ERRORS, \
+ 0, 0, 0, 0, 0); \
+ _qyy_res; \
+ })
+
+/* Several Valgrind tools (Memcheck, Massif, Helgrind, DRD) rely on knowing
+ when heap blocks are allocated in order to give accurate results. This
+ happens automatically for the standard allocator functions such as
+ malloc(), calloc(), realloc(), memalign(), new, new[], free(), delete,
+ delete[], etc.
+
+ But if your program uses a custom allocator, this doesn't automatically
+ happen, and Valgrind will not do as well. For example, if you allocate
+ superblocks with mmap() and then allocates chunks of the superblocks, all
+ Valgrind's observations will be at the mmap() level and it won't know that
+ the chunks should be considered separate entities. In Memcheck's case,
+ that means you probably won't get heap block overrun detection (because
+ there won't be redzones marked as unaddressable) and you definitely won't
+ get any leak detection.
+
+ The following client requests allow a custom allocator to be annotated so
+ that it can be handled accurately by Valgrind.
+
+ VALGRIND_MALLOCLIKE_BLOCK marks a region of memory as having been allocated
+ by a malloc()-like function. For Memcheck (an illustrative case), this
+ does two things:
+
+ - It records that the block has been allocated. This means any addresses
+ within the block mentioned in error messages will be
+ identified as belonging to the block. It also means that if the block
+ isn't freed it will be detected by the leak checker.
+
+ - It marks the block as being addressable and undefined (if 'is_zeroed' is
+ not set), or addressable and defined (if 'is_zeroed' is set). This
+ controls how accesses to the block by the program are handled.
+
+ 'addr' is the start of the usable block (ie. after any
+ redzone), 'sizeB' is its size. 'rzB' is the redzone size if the allocator
+ can apply redzones -- these are blocks of padding at the start and end of
+ each block. Adding redzones is recommended as it makes it much more likely
+ Valgrind will spot block overruns. `is_zeroed' indicates if the memory is
+ zeroed (or filled with another predictable value), as is the case for
+ calloc().
+
+ VALGRIND_MALLOCLIKE_BLOCK should be put immediately after the point where a
+ heap block -- that will be used by the client program -- is allocated.
+ It's best to put it at the outermost level of the allocator if possible;
+ for example, if you have a function my_alloc() which calls
+ internal_alloc(), and the client request is put inside internal_alloc(),
+ stack traces relating to the heap block will contain entries for both
+ my_alloc() and internal_alloc(), which is probably not what you want.
+
+ For Memcheck users: if you use VALGRIND_MALLOCLIKE_BLOCK to carve out
+ custom blocks from within a heap block, B, that has been allocated with
+ malloc/calloc/new/etc, then block B will be *ignored* during leak-checking
+ -- the custom blocks will take precedence.
+
+ VALGRIND_FREELIKE_BLOCK is the partner to VALGRIND_MALLOCLIKE_BLOCK. For
+ Memcheck, it does two things:
+
+ - It records that the block has been deallocated. This assumes that the
+ block was annotated as having been allocated via
+ VALGRIND_MALLOCLIKE_BLOCK. Otherwise, an error will be issued.
+
+ - It marks the block as being unaddressable.
+
+ VALGRIND_FREELIKE_BLOCK should be put immediately after the point where a
+ heap block is deallocated.
+
+ In many cases, these two client requests will not be enough to get your
+ allocator working well with Memcheck. More specifically, if your allocator
+ writes to freed blocks in any way then a VALGRIND_MAKE_MEM_UNDEFINED call
+ will be necessary to mark the memory as addressable just before the zeroing
+ occurs, otherwise you'll get a lot of invalid write errors. For example,
+ you'll need to do this if your allocator recycles freed blocks, but it
+ zeroes them before handing them back out (via VALGRIND_MALLOCLIKE_BLOCK).
+ Alternatively, if your allocator reuses freed blocks for allocator-internal
+ data structures, VALGRIND_MAKE_MEM_UNDEFINED calls will also be necessary.
+
+ Really, what's happening is a blurring of the lines between the client
+ program and the allocator... after VALGRIND_FREELIKE_BLOCK is called, the
+ memory should be considered unaddressable to the client program, but the
+ allocator knows more than the rest of the client program and so may be able
+ to safely access it. Extra client requests are necessary for Valgrind to
+ understand the distinction between the allocator and the rest of the
+ program.
+
+ Note: there is currently no VALGRIND_REALLOCLIKE_BLOCK client request; it
+ has to be emulated with MALLOCLIKE/FREELIKE and memory copying.
+
+ Ignored if addr == 0.
+*/
+#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__MALLOCLIKE_BLOCK, \
+ addr, sizeB, rzB, is_zeroed, 0); \
+ }
+
+/* See the comment for VALGRIND_MALLOCLIKE_BLOCK for details.
+ Ignored if addr == 0.
+*/
+#define VALGRIND_FREELIKE_BLOCK(addr, rzB) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__FREELIKE_BLOCK, \
+ addr, rzB, 0, 0, 0); \
+ }
+
+/* Create a memory pool. */
+#define VALGRIND_CREATE_MEMPOOL(pool, rzB, is_zeroed) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__CREATE_MEMPOOL, \
+ pool, rzB, is_zeroed, 0, 0); \
+ }
+
+/* Destroy a memory pool. */
+#define VALGRIND_DESTROY_MEMPOOL(pool) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__DESTROY_MEMPOOL, \
+ pool, 0, 0, 0, 0); \
+ }
+
+/* Associate a piece of memory with a memory pool. */
+#define VALGRIND_MEMPOOL_ALLOC(pool, addr, size) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__MEMPOOL_ALLOC, \
+ pool, addr, size, 0, 0); \
+ }
+
+/* Disassociate a piece of memory from a memory pool. */
+#define VALGRIND_MEMPOOL_FREE(pool, addr) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__MEMPOOL_FREE, \
+ pool, addr, 0, 0, 0); \
+ }
+
+/* Disassociate any pieces outside a particular range. */
+#define VALGRIND_MEMPOOL_TRIM(pool, addr, size) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__MEMPOOL_TRIM, \
+ pool, addr, size, 0, 0); \
+ }
+
+/* Resize and/or move a piece associated with a memory pool. */
+#define VALGRIND_MOVE_MEMPOOL(poolA, poolB) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__MOVE_MEMPOOL, \
+ poolA, poolB, 0, 0, 0); \
+ }
+
+/* Resize and/or move a piece associated with a memory pool. */
+#define VALGRIND_MEMPOOL_CHANGE(pool, addrA, addrB, size) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__MEMPOOL_CHANGE, \
+ pool, addrA, addrB, size, 0); \
+ }
+
+/* Return 1 if a mempool exists, else 0. */
+#define VALGRIND_MEMPOOL_EXISTS(pool) \
+ __extension__ \
+ ({unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__MEMPOOL_EXISTS, \
+ pool, 0, 0, 0, 0); \
+ _qzz_res; \
+ })
+
+/* Mark a piece of memory as being a stack. Returns a stack id. */
+#define VALGRIND_STACK_REGISTER(start, end) \
+ __extension__ \
+ ({unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__STACK_REGISTER, \
+ start, end, 0, 0, 0); \
+ _qzz_res; \
+ })
+
+/* Unmark the piece of memory associated with a stack id as being a
+ stack. */
+#define VALGRIND_STACK_DEREGISTER(id) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__STACK_DEREGISTER, \
+ id, 0, 0, 0, 0); \
+ }
+
+/* Change the start and end address of the stack id. */
+#define VALGRIND_STACK_CHANGE(id, start, end) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__STACK_CHANGE, \
+ id, start, end, 0, 0); \
+ }
+
+/* Load PDB debug info for Wine PE image_map. */
+#define VALGRIND_LOAD_PDB_DEBUGINFO(fd, ptr, total_size, delta) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__LOAD_PDB_DEBUGINFO, \
+ fd, ptr, total_size, delta, 0); \
+ }
+
+/* Map a code address to a source file name and line number. buf64
+ must point to a 64-byte buffer in the caller's address space. The
+ result will be dumped in there and is guaranteed to be zero
+ terminated. If no info is found, the first byte is set to zero. */
+#define VALGRIND_MAP_IP_TO_SRCLOC(addr, buf64) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__MAP_IP_TO_SRCLOC, \
+ addr, buf64, 0, 0, 0); \
+ }
+
+
+#undef PLAT_x86_linux
+#undef PLAT_amd64_linux
+#undef PLAT_ppc32_linux
+#undef PLAT_ppc64_linux
+#undef PLAT_arm_linux
+#undef PLAT_ppc32_aix5
+#undef PLAT_ppc64_aix5
+
+#endif /* __VALGRIND_H */
diff --git a/security/sandbox/chromium/base/thread_annotations.h b/security/sandbox/chromium/base/thread_annotations.h
new file mode 100644
index 0000000000..fdd32f8490
--- /dev/null
+++ b/security/sandbox/chromium/base/thread_annotations.h
@@ -0,0 +1,264 @@
+// Copyright (c) 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This header file contains macro definitions for thread safety annotations
+// that allow developers to document the locking policies of multi-threaded
+// code. The annotations can also help program analysis tools to identify
+// potential thread safety issues.
+//
+// Note that the annotations we use are described as deprecated in the Clang
+// documentation, linked below. E.g. we use EXCLUSIVE_LOCKS_REQUIRED where the
+// Clang docs use REQUIRES.
+//
+// http://clang.llvm.org/docs/ThreadSafetyAnalysis.html
+//
+// We use the deprecated Clang annotations to match Abseil (relevant header
+// linked below) and its ecosystem of libraries. We will follow Abseil with
+// respect to upgrading to more modern annotations.
+//
+// https://github.com/abseil/abseil-cpp/blob/master/absl/base/thread_annotations.h
+//
+// These annotations are implemented using compiler attributes. Using the macros
+// defined here instead of raw attributes allow for portability and future
+// compatibility.
+//
+// When referring to mutexes in the arguments of the attributes, you should
+// use variable names or more complex expressions (e.g. my_object->mutex_)
+// that evaluate to a concrete mutex object whenever possible. If the mutex
+// you want to refer to is not in scope, you may use a member pointer
+// (e.g. &MyClass::mutex_) to refer to a mutex in some (unknown) object.
+
+#ifndef BASE_THREAD_ANNOTATIONS_H_
+#define BASE_THREAD_ANNOTATIONS_H_
+
+#include "base/logging.h"
+#include "build/build_config.h"
+
+#if defined(__clang__)
+#define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x))
+#else
+#define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op
+#endif
+
+// GUARDED_BY()
+//
+// Documents if a shared field or global variable needs to be protected by a
+// mutex. GUARDED_BY() allows the user to specify a particular mutex that
+// should be held when accessing the annotated variable.
+//
+// Example:
+//
+// Mutex mu;
+// int p1 GUARDED_BY(mu);
+#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x))
+
+// PT_GUARDED_BY()
+//
+// Documents if the memory location pointed to by a pointer should be guarded
+// by a mutex when dereferencing the pointer.
+//
+// Example:
+// Mutex mu;
+// int *p1 PT_GUARDED_BY(mu);
+//
+// Note that a pointer variable to a shared memory location could itself be a
+// shared variable.
+//
+// Example:
+//
+// // `q`, guarded by `mu1`, points to a shared memory location that is
+// // guarded by `mu2`:
+// int *q GUARDED_BY(mu1) PT_GUARDED_BY(mu2);
+#define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_by(x))
+
+// ACQUIRED_AFTER() / ACQUIRED_BEFORE()
+//
+// Documents the acquisition order between locks that can be held
+// simultaneously by a thread. For any two locks that need to be annotated
+// to establish an acquisition order, only one of them needs the annotation.
+// (i.e. You don't have to annotate both locks with both ACQUIRED_AFTER
+// and ACQUIRED_BEFORE.)
+//
+// Example:
+//
+// Mutex m1;
+// Mutex m2 ACQUIRED_AFTER(m1);
+#define ACQUIRED_AFTER(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__))
+
+#define ACQUIRED_BEFORE(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__))
+
+// EXCLUSIVE_LOCKS_REQUIRED() / SHARED_LOCKS_REQUIRED()
+//
+// Documents a function that expects a mutex to be held prior to entry.
+// The mutex is expected to be held both on entry to, and exit from, the
+// function.
+//
+// Example:
+//
+// Mutex mu1, mu2;
+// int a GUARDED_BY(mu1);
+// int b GUARDED_BY(mu2);
+//
+// void foo() EXCLUSIVE_LOCKS_REQUIRED(mu1, mu2) { ... };
+#define EXCLUSIVE_LOCKS_REQUIRED(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(exclusive_locks_required(__VA_ARGS__))
+
+#define SHARED_LOCKS_REQUIRED(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(shared_locks_required(__VA_ARGS__))
+
+// LOCKS_EXCLUDED()
+//
+// Documents the locks acquired in the body of the function. These locks
+// cannot be held when calling this function (as Abseil's `Mutex` locks are
+// non-reentrant).
+#define LOCKS_EXCLUDED(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__))
+
+// LOCK_RETURNED()
+//
+// Documents a function that returns a mutex without acquiring it. For example,
+// a public getter method that returns a pointer to a private mutex should
+// be annotated with LOCK_RETURNED.
+#define LOCK_RETURNED(x) THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x))
+
+// LOCKABLE
+//
+// Documents if a class/type is a lockable type (such as the `Mutex` class).
+#define LOCKABLE THREAD_ANNOTATION_ATTRIBUTE__(lockable)
+
+// SCOPED_LOCKABLE
+//
+// Documents if a class does RAII locking (such as the `MutexLock` class).
+// The constructor should use `LOCK_FUNCTION()` to specify the mutex that is
+// acquired, and the destructor should use `UNLOCK_FUNCTION()` with no
+// arguments; the analysis will assume that the destructor unlocks whatever the
+// constructor locked.
+#define SCOPED_LOCKABLE THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable)
+
+// EXCLUSIVE_LOCK_FUNCTION()
+//
+// Documents functions that acquire a lock in the body of a function, and do
+// not release it.
+#define EXCLUSIVE_LOCK_FUNCTION(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(exclusive_lock_function(__VA_ARGS__))
+
+// SHARED_LOCK_FUNCTION()
+//
+// Documents functions that acquire a shared (reader) lock in the body of a
+// function, and do not release it.
+#define SHARED_LOCK_FUNCTION(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(shared_lock_function(__VA_ARGS__))
+
+// UNLOCK_FUNCTION()
+//
+// Documents functions that expect a lock to be held on entry to the function,
+// and release it in the body of the function.
+#define UNLOCK_FUNCTION(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(unlock_function(__VA_ARGS__))
+
+// EXCLUSIVE_TRYLOCK_FUNCTION() / SHARED_TRYLOCK_FUNCTION()
+//
+// Documents functions that try to acquire a lock, and return success or failure
+// (or a non-boolean value that can be interpreted as a boolean).
+// The first argument should be `true` for functions that return `true` on
+// success, or `false` for functions that return `false` on success. The second
+// argument specifies the mutex that is locked on success. If unspecified, this
+// mutex is assumed to be `this`.
+#define EXCLUSIVE_TRYLOCK_FUNCTION(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(exclusive_trylock_function(__VA_ARGS__))
+
+#define SHARED_TRYLOCK_FUNCTION(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(shared_trylock_function(__VA_ARGS__))
+
+// ASSERT_EXCLUSIVE_LOCK() / ASSERT_SHARED_LOCK()
+//
+// Documents functions that dynamically check to see if a lock is held, and fail
+// if it is not held.
+#define ASSERT_EXCLUSIVE_LOCK(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(assert_exclusive_lock(__VA_ARGS__))
+
+#define ASSERT_SHARED_LOCK(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_lock(__VA_ARGS__))
+
+// NO_THREAD_SAFETY_ANALYSIS
+//
+// Turns off thread safety checking within the body of a particular function.
+// This annotation is used to mark functions that are known to be correct, but
+// the locking behavior is more complicated than the analyzer can handle.
+#define NO_THREAD_SAFETY_ANALYSIS \
+ THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis)
+
+//------------------------------------------------------------------------------
+// Tool-Supplied Annotations
+//------------------------------------------------------------------------------
+
+// TS_UNCHECKED should be placed around lock expressions that are not valid
+// C++ syntax, but which are present for documentation purposes. These
+// annotations will be ignored by the analysis.
+#define TS_UNCHECKED(x) ""
+
+// TS_FIXME is used to mark lock expressions that are not valid C++ syntax.
+// It is used by automated tools to mark and disable invalid expressions.
+// The annotation should either be fixed, or changed to TS_UNCHECKED.
+#define TS_FIXME(x) ""
+
+// Like NO_THREAD_SAFETY_ANALYSIS, this turns off checking within the body of
+// a particular function. However, this attribute is used to mark functions
+// that are incorrect and need to be fixed. It is used by automated tools to
+// avoid breaking the build when the analysis is updated.
+// Code owners are expected to eventually fix the routine.
+#define NO_THREAD_SAFETY_ANALYSIS_FIXME NO_THREAD_SAFETY_ANALYSIS
+
+// Similar to NO_THREAD_SAFETY_ANALYSIS_FIXME, this macro marks a GUARDED_BY
+// annotation that needs to be fixed, because it is producing thread safety
+// warning. It disables the GUARDED_BY.
+#define GUARDED_BY_FIXME(x)
+
+// Disables warnings for a single read operation. This can be used to avoid
+// warnings when it is known that the read is not actually involved in a race,
+// but the compiler cannot confirm that.
+#define TS_UNCHECKED_READ(x) thread_safety_analysis::ts_unchecked_read(x)
+
+namespace thread_safety_analysis {
+
+// Takes a reference to a guarded data member, and returns an unguarded
+// reference.
+template <typename T>
+inline const T& ts_unchecked_read(const T& v) NO_THREAD_SAFETY_ANALYSIS {
+ return v;
+}
+
+template <typename T>
+inline T& ts_unchecked_read(T& v) NO_THREAD_SAFETY_ANALYSIS {
+ return v;
+}
+
+} // namespace thread_safety_analysis
+
+// The above is imported as-is from abseil-cpp. The following Chromium-specific
+// synonyms are added for Chromium concepts (SequenceChecker/ThreadChecker).
+#if DCHECK_IS_ON()
+
+// Equivalent to GUARDED_BY for SequenceChecker/ThreadChecker. Currently,
+// clang's error message "requires holding mutex" is misleading. Usage of this
+// macro is discouraged until the message is updated.
+// TODO(etiennep): Update comment above once clang's error message is updated.
+#define GUARDED_BY_CONTEXT(name) GUARDED_BY(name)
+
+// Equivalent to EXCLUSIVE_LOCKS_REQUIRED for SequenceChecker/ThreadChecker.
+// Currently, clang's error message "requires holding mutex" is misleading.
+// Usage of this macro is discouraged until the message is updated.
+// TODO(etiennep): Update comment above once clang's error message is updated.
+#define VALID_CONTEXT_REQUIRED(name) EXCLUSIVE_LOCKS_REQUIRED(name)
+
+#else // DCHECK_IS_ON()
+
+#define GUARDED_BY_CONTEXT(name)
+#define VALID_CONTEXT_REQUIRED(name)
+
+#endif // DCHECK_IS_ON()
+
+#endif // BASE_THREAD_ANNOTATIONS_H_
diff --git a/security/sandbox/chromium/base/threading/platform_thread.cc b/security/sandbox/chromium/base/threading/platform_thread.cc
new file mode 100644
index 0000000000..ae0a4499e7
--- /dev/null
+++ b/security/sandbox/chromium/base/threading/platform_thread.cc
@@ -0,0 +1,51 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/platform_thread.h"
+
+#include <atomic>
+#include <memory>
+
+#include "base/feature_list.h"
+
+namespace base {
+
+namespace {
+
+// Whether thread priorities should be used. When disabled,
+// PlatformThread::SetCurrentThreadPriority() no-ops.
+const Feature kThreadPrioritiesFeature{"ThreadPriorities",
+ FEATURE_ENABLED_BY_DEFAULT};
+
+// Whether thread priorities should be used.
+//
+// PlatformThread::SetCurrentThreadPriority() doesn't query the state of the
+// feature directly because FeatureList initialization is not always
+// synchronized with PlatformThread::SetCurrentThreadPriority().
+std::atomic<bool> g_use_thread_priorities(true);
+
+} // namespace
+
+// static
+void PlatformThread::SetCurrentThreadPriority(ThreadPriority priority) {
+ if (g_use_thread_priorities.load())
+ SetCurrentThreadPriorityImpl(priority);
+}
+
+namespace internal {
+
+void InitializeThreadPrioritiesFeature() {
+ // A DCHECK is triggered on FeatureList initialization if the state of a
+ // feature has been checked before. To avoid triggering this DCHECK in unit
+ // tests that call this before initializing the FeatureList, only check the
+ // state of the feature if the FeatureList is initialized.
+ if (FeatureList::GetInstance() &&
+ !FeatureList::IsEnabled(kThreadPrioritiesFeature)) {
+ g_use_thread_priorities.store(false);
+ }
+}
+
+} // namespace internal
+
+} // namespace base
diff --git a/security/sandbox/chromium/base/threading/platform_thread.h b/security/sandbox/chromium/base/threading/platform_thread.h
new file mode 100644
index 0000000000..aa5b9526c1
--- /dev/null
+++ b/security/sandbox/chromium/base/threading/platform_thread.h
@@ -0,0 +1,259 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// WARNING: You should *NOT* be using this class directly. PlatformThread is
+// the low-level platform-specific abstraction to the OS's threading interface.
+// You should instead be using a message-loop driven Thread, see thread.h.
+
+#ifndef BASE_THREADING_PLATFORM_THREAD_H_
+#define BASE_THREADING_PLATFORM_THREAD_H_
+
+#include <stddef.h>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include "base/win/windows_types.h"
+#elif defined(OS_FUCHSIA)
+#include <zircon/types.h>
+#elif defined(OS_MACOSX)
+#include <mach/mach_types.h>
+#elif defined(OS_POSIX)
+#include <pthread.h>
+#include <unistd.h>
+#endif
+
+namespace base {
+
+// Used for logging. Always an integer value.
+#if defined(OS_WIN)
+typedef DWORD PlatformThreadId;
+#elif defined(OS_FUCHSIA)
+typedef zx_handle_t PlatformThreadId;
+#elif defined(OS_MACOSX)
+typedef mach_port_t PlatformThreadId;
+#elif defined(OS_POSIX)
+typedef pid_t PlatformThreadId;
+#endif
+
+// Used for thread checking and debugging.
+// Meant to be as fast as possible.
+// These are produced by PlatformThread::CurrentRef(), and used to later
+// check if we are on the same thread or not by using ==. These are safe
+// to copy between threads, but can't be copied to another process as they
+// have no meaning there. Also, the internal identifier can be re-used
+// after a thread dies, so a PlatformThreadRef cannot be reliably used
+// to distinguish a new thread from an old, dead thread.
+class PlatformThreadRef {
+ public:
+#if defined(OS_WIN)
+ typedef DWORD RefType;
+#else // OS_POSIX
+ typedef pthread_t RefType;
+#endif
+ constexpr PlatformThreadRef() : id_(0) {}
+
+ explicit constexpr PlatformThreadRef(RefType id) : id_(id) {}
+
+ bool operator==(PlatformThreadRef other) const {
+ return id_ == other.id_;
+ }
+
+ bool operator!=(PlatformThreadRef other) const { return id_ != other.id_; }
+
+ bool is_null() const {
+ return id_ == 0;
+ }
+ private:
+ RefType id_;
+};
+
+// Used to operate on threads.
+class PlatformThreadHandle {
+ public:
+#if defined(OS_WIN)
+ typedef void* Handle;
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+ typedef pthread_t Handle;
+#endif
+
+ constexpr PlatformThreadHandle() : handle_(0) {}
+
+ explicit constexpr PlatformThreadHandle(Handle handle) : handle_(handle) {}
+
+ bool is_equal(const PlatformThreadHandle& other) const {
+ return handle_ == other.handle_;
+ }
+
+ bool is_null() const {
+ return !handle_;
+ }
+
+ Handle platform_handle() const {
+ return handle_;
+ }
+
+ private:
+ Handle handle_;
+};
+
+const PlatformThreadId kInvalidThreadId(0);
+
+// Valid values for priority of Thread::Options and SimpleThread::Options, and
+// SetCurrentThreadPriority(), listed in increasing order of importance.
+enum class ThreadPriority : int {
+ // Suitable for threads that shouldn't disrupt high priority work.
+ BACKGROUND,
+ // Default priority level.
+ NORMAL,
+ // Suitable for threads which generate data for the display (at ~60Hz).
+ DISPLAY,
+ // Suitable for low-latency, glitch-resistant audio.
+ REALTIME_AUDIO,
+};
+
+// A namespace for low-level thread functions.
+class BASE_EXPORT PlatformThread {
+ public:
+ // Implement this interface to run code on a background thread. Your
+ // ThreadMain method will be called on the newly created thread.
+ class BASE_EXPORT Delegate {
+ public:
+ virtual void ThreadMain() = 0;
+
+ protected:
+ virtual ~Delegate() = default;
+ };
+
+ // Gets the current thread id, which may be useful for logging purposes.
+ static PlatformThreadId CurrentId();
+
+ // Gets the current thread reference, which can be used to check if
+ // we're on the right thread quickly.
+ static PlatformThreadRef CurrentRef();
+
+ // Get the handle representing the current thread. On Windows, this is a
+ // pseudo handle constant which will always represent the thread using it and
+ // hence should not be shared with other threads nor be used to differentiate
+ // the current thread from another.
+ static PlatformThreadHandle CurrentHandle();
+
+ // Yield the current thread so another thread can be scheduled.
+ static void YieldCurrentThread();
+
+ // Sleeps for the specified duration (real-time; ignores time overrides).
+ // Note: The sleep duration may be in base::Time or base::TimeTicks, depending
+ // on platform. If you're looking to use this in unit tests testing delayed
+ // tasks, this will be unreliable - instead, use
+ // base::test::TaskEnvironment with MOCK_TIME mode.
+ static void Sleep(base::TimeDelta duration);
+
+ // Sets the thread name visible to debuggers/tools. This will try to
+ // initialize the context for current thread unless it's a WorkerThread.
+ static void SetName(const std::string& name);
+
+ // Gets the thread name, if previously set by SetName.
+ static const char* GetName();
+
+ // Creates a new thread. The |stack_size| parameter can be 0 to indicate
+ // that the default stack size should be used. Upon success,
+ // |*thread_handle| will be assigned a handle to the newly created thread,
+ // and |delegate|'s ThreadMain method will be executed on the newly created
+ // thread.
+ // NOTE: When you are done with the thread handle, you must call Join to
+ // release system resources associated with the thread. You must ensure that
+ // the Delegate object outlives the thread.
+ static bool Create(size_t stack_size,
+ Delegate* delegate,
+ PlatformThreadHandle* thread_handle) {
+ return CreateWithPriority(stack_size, delegate, thread_handle,
+ ThreadPriority::NORMAL);
+ }
+
+ // CreateWithPriority() does the same thing as Create() except the priority of
+ // the thread is set based on |priority|.
+ static bool CreateWithPriority(size_t stack_size, Delegate* delegate,
+ PlatformThreadHandle* thread_handle,
+ ThreadPriority priority);
+
+ // CreateNonJoinable() does the same thing as Create() except the thread
+ // cannot be Join()'d. Therefore, it also does not output a
+ // PlatformThreadHandle.
+ static bool CreateNonJoinable(size_t stack_size, Delegate* delegate);
+
+ // CreateNonJoinableWithPriority() does the same thing as CreateNonJoinable()
+ // except the priority of the thread is set based on |priority|.
+ static bool CreateNonJoinableWithPriority(size_t stack_size,
+ Delegate* delegate,
+ ThreadPriority priority);
+
+ // Joins with a thread created via the Create function. This function blocks
+ // the caller until the designated thread exits. This will invalidate
+ // |thread_handle|.
+ static void Join(PlatformThreadHandle thread_handle);
+
+ // Detaches and releases the thread handle. The thread is no longer joinable
+ // and |thread_handle| is invalidated after this call.
+ static void Detach(PlatformThreadHandle thread_handle);
+
+ // Returns true if SetCurrentThreadPriority() should be able to increase the
+ // priority of a thread to |priority|.
+ static bool CanIncreaseThreadPriority(ThreadPriority priority);
+
+ // Toggles the current thread's priority at runtime.
+ //
+ // A thread may not be able to raise its priority back up after lowering it if
+ // the process does not have a proper permission, e.g. CAP_SYS_NICE on Linux.
+ // A thread may not be able to lower its priority back down after raising it
+ // to REALTIME_AUDIO.
+ //
+ // This function must not be called from the main thread on Mac. This is to
+ // avoid performance regressions (https://crbug.com/601270).
+ //
+ // Since changing other threads' priority is not permitted in favor of
+ // security, this interface is restricted to change only the current thread
+ // priority (https://crbug.com/399473).
+ static void SetCurrentThreadPriority(ThreadPriority priority);
+
+ static ThreadPriority GetCurrentThreadPriority();
+
+#if defined(OS_LINUX)
+ // Toggles a specific thread's priority at runtime. This can be used to
+ // change the priority of a thread in a different process and will fail
+ // if the calling process does not have proper permissions. The
+ // SetCurrentThreadPriority() function above is preferred in favor of
+ // security but on platforms where sandboxed processes are not allowed to
+ // change priority this function exists to allow a non-sandboxed process
+ // to change the priority of sandboxed threads for improved performance.
+ // Warning: Don't use this for a main thread because that will change the
+ // whole thread group's (i.e. process) priority.
+ static void SetThreadPriority(PlatformThreadId thread_id,
+ ThreadPriority priority);
+#endif
+
+ // Returns the default thread stack size set by chrome. If we do not
+ // explicitly set default size then returns 0.
+ static size_t GetDefaultThreadStackSize();
+
+ private:
+ static void SetCurrentThreadPriorityImpl(ThreadPriority priority);
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(PlatformThread);
+};
+
+namespace internal {
+
+// Initializes the "ThreadPriorities" feature. The feature state is only taken
+// into account after this initialization. This initialization must be
+// synchronized with calls to PlatformThread::SetCurrentThreadPriority().
+void InitializeThreadPrioritiesFeature();
+
+} // namespace internal
+
+} // namespace base
+
+#endif // BASE_THREADING_PLATFORM_THREAD_H_
diff --git a/security/sandbox/chromium/base/threading/platform_thread_internal_posix.cc b/security/sandbox/chromium/base/threading/platform_thread_internal_posix.cc
new file mode 100644
index 0000000000..378a24d0d1
--- /dev/null
+++ b/security/sandbox/chromium/base/threading/platform_thread_internal_posix.cc
@@ -0,0 +1,39 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/platform_thread_internal_posix.h"
+
+#include "base/containers/adapters.h"
+#include "base/logging.h"
+
+namespace base {
+
+namespace internal {
+
+int ThreadPriorityToNiceValue(ThreadPriority priority) {
+ for (const auto& pair : kThreadPriorityToNiceValueMap) {
+ if (pair.priority == priority)
+ return pair.nice_value;
+ }
+ NOTREACHED() << "Unknown ThreadPriority";
+ return 0;
+}
+
+ThreadPriority NiceValueToThreadPriority(int nice_value) {
+ // Try to find a priority that best describes |nice_value|. If there isn't
+ // an exact match, this method returns the closest priority whose nice value
+ // is higher (lower priority) than |nice_value|.
+ for (const auto& pair : Reversed(kThreadPriorityToNiceValueMap)) {
+ if (pair.nice_value >= nice_value)
+ return pair.priority;
+ }
+
+ // Reaching here means |nice_value| is more than any of the defined
+ // priorities. The lowest priority is suitable in this case.
+ return ThreadPriority::BACKGROUND;
+}
+
+} // namespace internal
+
+} // namespace base
diff --git a/security/sandbox/chromium/base/threading/platform_thread_internal_posix.h b/security/sandbox/chromium/base/threading/platform_thread_internal_posix.h
new file mode 100644
index 0000000000..d248fa9bfd
--- /dev/null
+++ b/security/sandbox/chromium/base/threading/platform_thread_internal_posix.h
@@ -0,0 +1,62 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_THREADING_PLATFORM_THREAD_INTERNAL_POSIX_H_
+#define BASE_THREADING_PLATFORM_THREAD_INTERNAL_POSIX_H_
+
+#include "base/base_export.h"
+#include "base/optional.h"
+#include "base/threading/platform_thread.h"
+#include "build/build_config.h"
+
+namespace base {
+
+namespace internal {
+
+struct ThreadPriorityToNiceValuePair {
+ ThreadPriority priority;
+ int nice_value;
+};
+// The elements must be listed in the order of increasing priority (lowest
+// priority first), that is, in the order of decreasing nice values (highest
+// nice value first).
+BASE_EXPORT extern
+const ThreadPriorityToNiceValuePair kThreadPriorityToNiceValueMap[4];
+
+// Returns the nice value matching |priority| based on the platform-specific
+// implementation of kThreadPriorityToNiceValueMap.
+int ThreadPriorityToNiceValue(ThreadPriority priority);
+
+// Returns the ThreadPrioirty matching |nice_value| based on the platform-
+// specific implementation of kThreadPriorityToNiceValueMap.
+BASE_EXPORT ThreadPriority NiceValueToThreadPriority(int nice_value);
+
+// If non-nullopt, this return value will be used as the platform-specific
+// result of CanIncreaseThreadPriority().
+Optional<bool> CanIncreaseCurrentThreadPriorityForPlatform(
+ ThreadPriority priority);
+
+// Allows platform specific tweaks to the generic POSIX solution for
+// SetCurrentThreadPriority(). Returns true if the platform-specific
+// implementation handled this |priority| change, false if the generic
+// implementation should instead proceed.
+bool SetCurrentThreadPriorityForPlatform(ThreadPriority priority);
+
+// If non-null, this return value will be used as the platform-specific result
+// of CanIncreaseThreadPriority().
+Optional<ThreadPriority> GetCurrentThreadPriorityForPlatform();
+
+#if defined(OS_LINUX)
+// Current thread id is cached in thread local storage for performance reasons.
+// In some rare cases it's important to clear that cache explicitly (e.g. after
+// going through clone() syscall which does not call pthread_atfork()
+// handlers).
+BASE_EXPORT void ClearTidCache();
+#endif // defined(OS_LINUX)
+
+} // namespace internal
+
+} // namespace base
+
+#endif // BASE_THREADING_PLATFORM_THREAD_INTERNAL_POSIX_H_
diff --git a/security/sandbox/chromium/base/threading/platform_thread_posix.cc b/security/sandbox/chromium/base/threading/platform_thread_posix.cc
new file mode 100644
index 0000000000..335848e329
--- /dev/null
+++ b/security/sandbox/chromium/base/threading/platform_thread_posix.cc
@@ -0,0 +1,361 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/platform_thread.h"
+
+#include <errno.h>
+#include <pthread.h>
+#include <sched.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <memory>
+
+#include "base/debug/activity_tracker.h"
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/no_destructor.h"
+#include "base/threading/platform_thread_internal_posix.h"
+#include "base/threading/scoped_blocking_call.h"
+#include "base/threading/thread_id_name_manager.h"
+#include "build/build_config.h"
+
+#if !defined(OS_MACOSX) && !defined(OS_FUCHSIA) && !defined(OS_NACL)
+#include "base/posix/can_lower_nice_to.h"
+#endif
+
+#if defined(OS_LINUX)
+#include <sys/syscall.h>
+#endif
+
+#if defined(OS_FUCHSIA)
+#include <zircon/process.h>
+#else
+#include <sys/resource.h>
+#endif
+
+namespace base {
+
+void InitThreading();
+void TerminateOnThread();
+size_t GetDefaultThreadStackSize(const pthread_attr_t& attributes);
+
+namespace {
+
+struct ThreadParams {
+ ThreadParams()
+ : delegate(nullptr), joinable(false), priority(ThreadPriority::NORMAL) {}
+
+ PlatformThread::Delegate* delegate;
+ bool joinable;
+ ThreadPriority priority;
+};
+
+void* ThreadFunc(void* params) {
+ PlatformThread::Delegate* delegate = nullptr;
+
+ {
+ std::unique_ptr<ThreadParams> thread_params(
+ static_cast<ThreadParams*>(params));
+
+ delegate = thread_params->delegate;
+ if (!thread_params->joinable)
+ base::ThreadRestrictions::SetSingletonAllowed(false);
+
+#if !defined(OS_NACL)
+ // Threads on linux/android may inherit their priority from the thread
+ // where they were created. This explicitly sets the priority of all new
+ // threads.
+ PlatformThread::SetCurrentThreadPriority(thread_params->priority);
+#endif
+ }
+
+ ThreadIdNameManager::GetInstance()->RegisterThread(
+ PlatformThread::CurrentHandle().platform_handle(),
+ PlatformThread::CurrentId());
+
+ delegate->ThreadMain();
+
+ ThreadIdNameManager::GetInstance()->RemoveName(
+ PlatformThread::CurrentHandle().platform_handle(),
+ PlatformThread::CurrentId());
+
+ base::TerminateOnThread();
+ return nullptr;
+}
+
+bool CreateThread(size_t stack_size,
+ bool joinable,
+ PlatformThread::Delegate* delegate,
+ PlatformThreadHandle* thread_handle,
+ ThreadPriority priority) {
+ DCHECK(thread_handle);
+ base::InitThreading();
+
+ pthread_attr_t attributes;
+ pthread_attr_init(&attributes);
+
+ // Pthreads are joinable by default, so only specify the detached
+ // attribute if the thread should be non-joinable.
+ if (!joinable)
+ pthread_attr_setdetachstate(&attributes, PTHREAD_CREATE_DETACHED);
+
+ // Get a better default if available.
+ if (stack_size == 0)
+ stack_size = base::GetDefaultThreadStackSize(attributes);
+
+ if (stack_size > 0)
+ pthread_attr_setstacksize(&attributes, stack_size);
+
+ std::unique_ptr<ThreadParams> params(new ThreadParams);
+ params->delegate = delegate;
+ params->joinable = joinable;
+ params->priority = priority;
+
+ pthread_t handle;
+ int err = pthread_create(&handle, &attributes, ThreadFunc, params.get());
+ bool success = !err;
+ if (success) {
+ // ThreadParams should be deleted on the created thread after used.
+ ignore_result(params.release());
+ } else {
+ // Value of |handle| is undefined if pthread_create fails.
+ handle = 0;
+ errno = err;
+ PLOG(ERROR) << "pthread_create";
+ }
+ *thread_handle = PlatformThreadHandle(handle);
+
+ pthread_attr_destroy(&attributes);
+
+ return success;
+}
+
+#if defined(OS_LINUX)
+
+// Store the thread ids in local storage since calling the SWI can
+// expensive and PlatformThread::CurrentId is used liberally. Clear
+// the stored value after a fork() because forking changes the thread
+// id. Forking without going through fork() (e.g. clone()) is not
+// supported, but there is no known usage. Using thread_local is
+// fine here (despite being banned) since it is going to be allowed
+// but is blocked on a clang bug for Mac (https://crbug.com/829078)
+// and we can't use ThreadLocalStorage because of re-entrancy due to
+// CHECK/DCHECKs.
+thread_local pid_t g_thread_id = -1;
+
+class InitAtFork {
+ public:
+ InitAtFork() { pthread_atfork(nullptr, nullptr, internal::ClearTidCache); }
+};
+
+#endif // defined(OS_LINUX)
+
+} // namespace
+
+#if defined(OS_LINUX)
+
+namespace internal {
+
+void ClearTidCache() {
+ g_thread_id = -1;
+}
+
+} // namespace internal
+
+#endif // defined(OS_LINUX)
+
+// static
+PlatformThreadId PlatformThread::CurrentId() {
+ // Pthreads doesn't have the concept of a thread ID, so we have to reach down
+ // into the kernel.
+#if defined(OS_MACOSX)
+ return pthread_mach_thread_np(pthread_self());
+#elif defined(OS_LINUX)
+ static NoDestructor<InitAtFork> init_at_fork;
+ if (g_thread_id == -1) {
+ g_thread_id = syscall(__NR_gettid);
+ } else {
+ DCHECK_EQ(g_thread_id, syscall(__NR_gettid))
+ << "Thread id stored in TLS is different from thread id returned by "
+ "the system. It is likely that the process was forked without going "
+ "through fork().";
+ }
+ return g_thread_id;
+#elif defined(OS_ANDROID)
+ return gettid();
+#elif defined(OS_FUCHSIA)
+ return zx_thread_self();
+#elif defined(OS_SOLARIS) || defined(OS_QNX)
+ return pthread_self();
+#elif defined(OS_NACL) && defined(__GLIBC__)
+ return pthread_self();
+#elif defined(OS_NACL) && !defined(__GLIBC__)
+ // Pointers are 32-bits in NaCl.
+ return reinterpret_cast<int32_t>(pthread_self());
+#elif defined(OS_POSIX) && defined(OS_AIX)
+ return pthread_self();
+#elif defined(OS_POSIX) && !defined(OS_AIX)
+ return reinterpret_cast<int64_t>(pthread_self());
+#endif
+}
+
+// static
+PlatformThreadRef PlatformThread::CurrentRef() {
+ return PlatformThreadRef(pthread_self());
+}
+
+// static
+PlatformThreadHandle PlatformThread::CurrentHandle() {
+ return PlatformThreadHandle(pthread_self());
+}
+
+// static
+void PlatformThread::YieldCurrentThread() {
+ sched_yield();
+}
+
+// static
+void PlatformThread::Sleep(TimeDelta duration) {
+ struct timespec sleep_time, remaining;
+
+ // Break the duration into seconds and nanoseconds.
+ // NOTE: TimeDelta's microseconds are int64s while timespec's
+ // nanoseconds are longs, so this unpacking must prevent overflow.
+ sleep_time.tv_sec = duration.InSeconds();
+ duration -= TimeDelta::FromSeconds(sleep_time.tv_sec);
+ sleep_time.tv_nsec = duration.InMicroseconds() * 1000; // nanoseconds
+
+ while (nanosleep(&sleep_time, &remaining) == -1 && errno == EINTR)
+ sleep_time = remaining;
+}
+
+// static
+const char* PlatformThread::GetName() {
+ return ThreadIdNameManager::GetInstance()->GetName(CurrentId());
+}
+
+// static
+bool PlatformThread::CreateWithPriority(size_t stack_size, Delegate* delegate,
+ PlatformThreadHandle* thread_handle,
+ ThreadPriority priority) {
+ return CreateThread(stack_size, true /* joinable thread */, delegate,
+ thread_handle, priority);
+}
+
+// static
+bool PlatformThread::CreateNonJoinable(size_t stack_size, Delegate* delegate) {
+ return CreateNonJoinableWithPriority(stack_size, delegate,
+ ThreadPriority::NORMAL);
+}
+
+// static
+bool PlatformThread::CreateNonJoinableWithPriority(size_t stack_size,
+ Delegate* delegate,
+ ThreadPriority priority) {
+ PlatformThreadHandle unused;
+
+ bool result = CreateThread(stack_size, false /* non-joinable thread */,
+ delegate, &unused, priority);
+ return result;
+}
+
+// static
+void PlatformThread::Join(PlatformThreadHandle thread_handle) {
+ // Record the event that this thread is blocking upon (for hang diagnosis).
+ base::debug::ScopedThreadJoinActivity thread_activity(&thread_handle);
+
+ // Joining another thread may block the current thread for a long time, since
+ // the thread referred to by |thread_handle| may still be running long-lived /
+ // blocking tasks.
+ base::internal::ScopedBlockingCallWithBaseSyncPrimitives scoped_blocking_call(
+ FROM_HERE, base::BlockingType::MAY_BLOCK);
+ CHECK_EQ(0, pthread_join(thread_handle.platform_handle(), nullptr));
+}
+
+// static
+void PlatformThread::Detach(PlatformThreadHandle thread_handle) {
+ CHECK_EQ(0, pthread_detach(thread_handle.platform_handle()));
+}
+
+// Mac and Fuchsia have their own Set/GetCurrentThreadPriority()
+// implementations.
+#if !defined(OS_MACOSX) && !defined(OS_FUCHSIA)
+
+// static
+bool PlatformThread::CanIncreaseThreadPriority(ThreadPriority priority) {
+#if defined(OS_NACL)
+ return false;
+#else
+ auto platform_specific_ability =
+ internal::CanIncreaseCurrentThreadPriorityForPlatform(priority);
+ if (platform_specific_ability)
+ return platform_specific_ability.value();
+
+ return internal::CanLowerNiceTo(
+ internal::ThreadPriorityToNiceValue(priority));
+#endif // defined(OS_NACL)
+}
+
+// static
+void PlatformThread::SetCurrentThreadPriorityImpl(ThreadPriority priority) {
+#if defined(OS_NACL)
+ NOTIMPLEMENTED();
+#else
+ if (internal::SetCurrentThreadPriorityForPlatform(priority))
+ return;
+
+ // setpriority(2) should change the whole thread group's (i.e. process)
+ // priority. However, as stated in the bugs section of
+ // http://man7.org/linux/man-pages/man2/getpriority.2.html: "under the current
+ // Linux/NPTL implementation of POSIX threads, the nice value is a per-thread
+ // attribute". Also, 0 is prefered to the current thread id since it is
+ // equivalent but makes sandboxing easier (https://crbug.com/399473).
+ const int nice_setting = internal::ThreadPriorityToNiceValue(priority);
+ if (setpriority(PRIO_PROCESS, 0, nice_setting)) {
+ DVPLOG(1) << "Failed to set nice value of thread ("
+ << PlatformThread::CurrentId() << ") to " << nice_setting;
+ }
+#endif // defined(OS_NACL)
+}
+
+// static
+ThreadPriority PlatformThread::GetCurrentThreadPriority() {
+#if defined(OS_NACL)
+ NOTIMPLEMENTED();
+ return ThreadPriority::NORMAL;
+#else
+ // Mirrors SetCurrentThreadPriority()'s implementation.
+ auto platform_specific_priority =
+ internal::GetCurrentThreadPriorityForPlatform();
+ if (platform_specific_priority)
+ return platform_specific_priority.value();
+
+ // Need to clear errno before calling getpriority():
+ // http://man7.org/linux/man-pages/man2/getpriority.2.html
+ errno = 0;
+ int nice_value = getpriority(PRIO_PROCESS, 0);
+ if (errno != 0) {
+ DVPLOG(1) << "Failed to get nice value of thread ("
+ << PlatformThread::CurrentId() << ")";
+ return ThreadPriority::NORMAL;
+ }
+
+ return internal::NiceValueToThreadPriority(nice_value);
+#endif // !defined(OS_NACL)
+}
+
+#endif // !defined(OS_MACOSX) && !defined(OS_FUCHSIA)
+
+// static
+size_t PlatformThread::GetDefaultThreadStackSize() {
+ pthread_attr_t attributes;
+ pthread_attr_init(&attributes);
+ return base::GetDefaultThreadStackSize(attributes);
+}
+
+} // namespace base
diff --git a/security/sandbox/chromium/base/threading/platform_thread_win.cc b/security/sandbox/chromium/base/threading/platform_thread_win.cc
new file mode 100644
index 0000000000..0bcf6db247
--- /dev/null
+++ b/security/sandbox/chromium/base/threading/platform_thread_win.cc
@@ -0,0 +1,463 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/platform_thread_win.h"
+
+#include <stddef.h>
+
+#include "base/debug/activity_tracker.h"
+#include "base/debug/alias.h"
+#include "base/debug/crash_logging.h"
+#include "base/debug/profiler.h"
+#include "base/logging.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/process/memory.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/threading/scoped_blocking_call.h"
+#include "base/threading/thread_id_name_manager.h"
+#include "base/threading/thread_restrictions.h"
+#include "base/time/time_override.h"
+#include "base/win/scoped_handle.h"
+#include "base/win/windows_version.h"
+#include "build/build_config.h"
+
+#include <windows.h>
+
+namespace base {
+
+namespace {
+
+// The most common value returned by ::GetThreadPriority() after background
+// thread mode is enabled on Windows 7.
+constexpr int kWin7BackgroundThreadModePriority = 4;
+
+// Value sometimes returned by ::GetThreadPriority() after thread priority is
+// set to normal on Windows 7.
+constexpr int kWin7NormalPriority = 3;
+
+// These values are sometimes returned by ::GetThreadPriority().
+constexpr int kWinNormalPriority1 = 5;
+constexpr int kWinNormalPriority2 = 6;
+
+// The information on how to set the thread name comes from
+// a MSDN article: http://msdn2.microsoft.com/en-us/library/xcb2z8hs.aspx
+const DWORD kVCThreadNameException = 0x406D1388;
+
+typedef struct tagTHREADNAME_INFO {
+ DWORD dwType; // Must be 0x1000.
+ LPCSTR szName; // Pointer to name (in user addr space).
+ DWORD dwThreadID; // Thread ID (-1=caller thread).
+ DWORD dwFlags; // Reserved for future use, must be zero.
+} THREADNAME_INFO;
+
+// The SetThreadDescription API was brought in version 1607 of Windows 10.
+typedef HRESULT(WINAPI* SetThreadDescription)(HANDLE hThread,
+ PCWSTR lpThreadDescription);
+
+// This function has try handling, so it is separated out of its caller.
+void SetNameInternal(PlatformThreadId thread_id, const char* name) {
+ //This function is only used for debugging purposes, as you can find by its caller
+#ifndef __MINGW32__
+ THREADNAME_INFO info;
+ info.dwType = 0x1000;
+ info.szName = name;
+ info.dwThreadID = thread_id;
+ info.dwFlags = 0;
+
+ __try {
+ RaiseException(kVCThreadNameException, 0, sizeof(info)/sizeof(DWORD),
+ reinterpret_cast<DWORD_PTR*>(&info));
+ } __except(EXCEPTION_CONTINUE_EXECUTION) {
+ }
+#endif
+}
+
+struct ThreadParams {
+ PlatformThread::Delegate* delegate;
+ bool joinable;
+ ThreadPriority priority;
+};
+
+DWORD __stdcall ThreadFunc(void* params) {
+ ThreadParams* thread_params = static_cast<ThreadParams*>(params);
+ PlatformThread::Delegate* delegate = thread_params->delegate;
+ if (!thread_params->joinable)
+ base::ThreadRestrictions::SetSingletonAllowed(false);
+
+ if (thread_params->priority != ThreadPriority::NORMAL)
+ PlatformThread::SetCurrentThreadPriority(thread_params->priority);
+
+ // Retrieve a copy of the thread handle to use as the key in the
+ // thread name mapping.
+ PlatformThreadHandle::Handle platform_handle;
+ BOOL did_dup = DuplicateHandle(GetCurrentProcess(),
+ GetCurrentThread(),
+ GetCurrentProcess(),
+ &platform_handle,
+ 0,
+ FALSE,
+ DUPLICATE_SAME_ACCESS);
+
+ win::ScopedHandle scoped_platform_handle;
+
+ if (did_dup) {
+ scoped_platform_handle.Set(platform_handle);
+ ThreadIdNameManager::GetInstance()->RegisterThread(
+ scoped_platform_handle.Get(),
+ PlatformThread::CurrentId());
+ }
+
+ delete thread_params;
+ delegate->ThreadMain();
+
+ if (did_dup) {
+ ThreadIdNameManager::GetInstance()->RemoveName(
+ scoped_platform_handle.Get(),
+ PlatformThread::CurrentId());
+ }
+
+ return 0;
+}
+
+// CreateThreadInternal() matches PlatformThread::CreateWithPriority(), except
+// that |out_thread_handle| may be nullptr, in which case a non-joinable thread
+// is created.
+bool CreateThreadInternal(size_t stack_size,
+ PlatformThread::Delegate* delegate,
+ PlatformThreadHandle* out_thread_handle,
+ ThreadPriority priority) {
+ unsigned int flags = 0;
+ if (stack_size > 0) {
+ flags = STACK_SIZE_PARAM_IS_A_RESERVATION;
+#if defined(ARCH_CPU_32_BITS)
+ } else {
+ // The process stack size is increased to give spaces to |RendererMain| in
+ // |chrome/BUILD.gn|, but keep the default stack size of other threads to
+ // 1MB for the address space pressure.
+ flags = STACK_SIZE_PARAM_IS_A_RESERVATION;
+ stack_size = 1024 * 1024;
+#endif
+ }
+
+ ThreadParams* params = new ThreadParams;
+ params->delegate = delegate;
+ params->joinable = out_thread_handle != nullptr;
+ params->priority = priority;
+
+ // Using CreateThread here vs _beginthreadex makes thread creation a bit
+ // faster and doesn't require the loader lock to be available. Our code will
+ // have to work running on CreateThread() threads anyway, since we run code on
+ // the Windows thread pool, etc. For some background on the difference:
+ // http://www.microsoft.com/msj/1099/win32/win321099.aspx
+ void* thread_handle =
+ ::CreateThread(nullptr, stack_size, ThreadFunc, params, flags, nullptr);
+
+ if (!thread_handle) {
+ DWORD last_error = ::GetLastError();
+
+ switch (last_error) {
+ case ERROR_NOT_ENOUGH_MEMORY:
+ case ERROR_OUTOFMEMORY:
+ case ERROR_COMMITMENT_LIMIT:
+ TerminateBecauseOutOfMemory(stack_size);
+ break;
+
+ default:
+ static auto* last_error_crash_key = debug::AllocateCrashKeyString(
+ "create_thread_last_error", debug::CrashKeySize::Size32);
+ debug::SetCrashKeyString(last_error_crash_key,
+ base::NumberToString(last_error));
+ break;
+ }
+
+ delete params;
+ return false;
+ }
+
+ if (out_thread_handle)
+ *out_thread_handle = PlatformThreadHandle(thread_handle);
+ else
+ CloseHandle(thread_handle);
+ return true;
+}
+
+} // namespace
+
+namespace internal {
+
+void AssertMemoryPriority(HANDLE thread, int memory_priority) {
+#if DCHECK_IS_ON()
+ static const auto get_thread_information_fn =
+ reinterpret_cast<decltype(&::GetThreadInformation)>(::GetProcAddress(
+ ::GetModuleHandle(L"Kernel32.dll"), "GetThreadInformation"));
+
+ if (!get_thread_information_fn) {
+ DCHECK_EQ(win::GetVersion(), win::Version::WIN7);
+ return;
+ }
+
+ MEMORY_PRIORITY_INFORMATION memory_priority_information = {};
+ DCHECK(get_thread_information_fn(thread, ::ThreadMemoryPriority,
+ &memory_priority_information,
+ sizeof(memory_priority_information)));
+
+ DCHECK_EQ(memory_priority,
+ static_cast<int>(memory_priority_information.MemoryPriority));
+#endif
+}
+
+} // namespace internal
+
+// static
+PlatformThreadId PlatformThread::CurrentId() {
+ return ::GetCurrentThreadId();
+}
+
+// static
+PlatformThreadRef PlatformThread::CurrentRef() {
+ return PlatformThreadRef(::GetCurrentThreadId());
+}
+
+// static
+PlatformThreadHandle PlatformThread::CurrentHandle() {
+ return PlatformThreadHandle(::GetCurrentThread());
+}
+
+// static
+void PlatformThread::YieldCurrentThread() {
+ ::Sleep(0);
+}
+
+// static
+void PlatformThread::Sleep(TimeDelta duration) {
+ // When measured with a high resolution clock, Sleep() sometimes returns much
+ // too early. We may need to call it repeatedly to get the desired duration.
+ // PlatformThread::Sleep doesn't support mock-time, so this always uses
+ // real-time.
+ const TimeTicks end = subtle::TimeTicksNowIgnoringOverride() + duration;
+ for (TimeTicks now = subtle::TimeTicksNowIgnoringOverride(); now < end;
+ now = subtle::TimeTicksNowIgnoringOverride()) {
+ ::Sleep(static_cast<DWORD>((end - now).InMillisecondsRoundedUp()));
+ }
+}
+
+// static
+void PlatformThread::SetName(const std::string& name) {
+ ThreadIdNameManager::GetInstance()->SetName(name);
+
+ // The SetThreadDescription API works even if no debugger is attached.
+ static auto set_thread_description_func =
+ reinterpret_cast<SetThreadDescription>(::GetProcAddress(
+ ::GetModuleHandle(L"Kernel32.dll"), "SetThreadDescription"));
+ if (set_thread_description_func) {
+ set_thread_description_func(::GetCurrentThread(),
+ base::UTF8ToWide(name).c_str());
+ }
+
+ // The debugger needs to be around to catch the name in the exception. If
+ // there isn't a debugger, we are just needlessly throwing an exception.
+ if (!::IsDebuggerPresent())
+ return;
+
+ SetNameInternal(CurrentId(), name.c_str());
+}
+
+// static
+const char* PlatformThread::GetName() {
+ return ThreadIdNameManager::GetInstance()->GetName(CurrentId());
+}
+
+// static
+bool PlatformThread::CreateWithPriority(size_t stack_size, Delegate* delegate,
+ PlatformThreadHandle* thread_handle,
+ ThreadPriority priority) {
+ DCHECK(thread_handle);
+ return CreateThreadInternal(stack_size, delegate, thread_handle, priority);
+}
+
+// static
+bool PlatformThread::CreateNonJoinable(size_t stack_size, Delegate* delegate) {
+ return CreateNonJoinableWithPriority(stack_size, delegate,
+ ThreadPriority::NORMAL);
+}
+
+// static
+bool PlatformThread::CreateNonJoinableWithPriority(size_t stack_size,
+ Delegate* delegate,
+ ThreadPriority priority) {
+ return CreateThreadInternal(stack_size, delegate, nullptr /* non-joinable */,
+ priority);
+}
+
+// static
+void PlatformThread::Join(PlatformThreadHandle thread_handle) {
+ DCHECK(thread_handle.platform_handle());
+
+ DWORD thread_id = 0;
+ thread_id = ::GetThreadId(thread_handle.platform_handle());
+ DWORD last_error = 0;
+ if (!thread_id)
+ last_error = ::GetLastError();
+
+ // Record information about the exiting thread in case joining hangs.
+ base::debug::Alias(&thread_id);
+ base::debug::Alias(&last_error);
+
+ // Record the event that this thread is blocking upon (for hang diagnosis).
+ base::debug::ScopedThreadJoinActivity thread_activity(&thread_handle);
+
+ base::internal::ScopedBlockingCallWithBaseSyncPrimitives scoped_blocking_call(
+ FROM_HERE, base::BlockingType::MAY_BLOCK);
+
+ // Wait for the thread to exit. It should already have terminated but make
+ // sure this assumption is valid.
+ CHECK_EQ(WAIT_OBJECT_0,
+ WaitForSingleObject(thread_handle.platform_handle(), INFINITE));
+ CloseHandle(thread_handle.platform_handle());
+}
+
+// static
+void PlatformThread::Detach(PlatformThreadHandle thread_handle) {
+ CloseHandle(thread_handle.platform_handle());
+}
+
+// static
+bool PlatformThread::CanIncreaseThreadPriority(ThreadPriority priority) {
+ return true;
+}
+
+// static
+void PlatformThread::SetCurrentThreadPriorityImpl(ThreadPriority priority) {
+ PlatformThreadHandle::Handle thread_handle =
+ PlatformThread::CurrentHandle().platform_handle();
+
+ if (priority != ThreadPriority::BACKGROUND) {
+ // Exit background mode if the new priority is not BACKGROUND. This is a
+ // no-op if not in background mode.
+ ::SetThreadPriority(thread_handle, THREAD_MODE_BACKGROUND_END);
+ internal::AssertMemoryPriority(thread_handle, MEMORY_PRIORITY_NORMAL);
+ }
+
+ int desired_priority = THREAD_PRIORITY_ERROR_RETURN;
+ switch (priority) {
+ case ThreadPriority::BACKGROUND:
+ // Using THREAD_MODE_BACKGROUND_BEGIN instead of THREAD_PRIORITY_LOWEST
+ // improves input latency and navigation time. See
+ // https://docs.google.com/document/d/16XrOwuwTwKWdgPbcKKajTmNqtB4Am8TgS9GjbzBYLc0
+ //
+ // MSDN recommends THREAD_MODE_BACKGROUND_BEGIN for threads that perform
+ // background work, as it reduces disk and memory priority in addition to
+ // CPU priority.
+ desired_priority = THREAD_MODE_BACKGROUND_BEGIN;
+ break;
+ case ThreadPriority::NORMAL:
+ desired_priority = THREAD_PRIORITY_NORMAL;
+ break;
+ case ThreadPriority::DISPLAY:
+ desired_priority = THREAD_PRIORITY_ABOVE_NORMAL;
+ break;
+ case ThreadPriority::REALTIME_AUDIO:
+ desired_priority = THREAD_PRIORITY_TIME_CRITICAL;
+ break;
+ default:
+ NOTREACHED() << "Unknown priority.";
+ break;
+ }
+ DCHECK_NE(desired_priority, THREAD_PRIORITY_ERROR_RETURN);
+
+#if DCHECK_IS_ON()
+ const BOOL success =
+#endif
+ ::SetThreadPriority(thread_handle, desired_priority);
+ DPLOG_IF(ERROR, !success) << "Failed to set thread priority to "
+ << desired_priority;
+
+ if (priority == ThreadPriority::BACKGROUND) {
+ // In a background process, THREAD_MODE_BACKGROUND_BEGIN lowers the memory
+ // and I/O priorities but not the CPU priority (kernel bug?). Use
+ // THREAD_PRIORITY_LOWEST to also lower the CPU priority.
+ // https://crbug.com/901483
+ if (GetCurrentThreadPriority() != ThreadPriority::BACKGROUND) {
+ ::SetThreadPriority(thread_handle, THREAD_PRIORITY_LOWEST);
+ // Make sure that using THREAD_PRIORITY_LOWEST didn't affect the memory
+ // priority set by THREAD_MODE_BACKGROUND_BEGIN. There is no practical
+ // way to verify the I/O priority.
+ internal::AssertMemoryPriority(thread_handle, MEMORY_PRIORITY_VERY_LOW);
+ }
+ }
+
+ DCHECK_EQ(GetCurrentThreadPriority(), priority);
+}
+
+// static
+ThreadPriority PlatformThread::GetCurrentThreadPriority() {
+ static_assert(
+ THREAD_PRIORITY_IDLE < 0,
+ "THREAD_PRIORITY_IDLE is >= 0 and will incorrectly cause errors.");
+ static_assert(
+ THREAD_PRIORITY_LOWEST < 0,
+ "THREAD_PRIORITY_LOWEST is >= 0 and will incorrectly cause errors.");
+ static_assert(THREAD_PRIORITY_BELOW_NORMAL < 0,
+ "THREAD_PRIORITY_BELOW_NORMAL is >= 0 and will incorrectly "
+ "cause errors.");
+ static_assert(
+ THREAD_PRIORITY_NORMAL == 0,
+ "The logic below assumes that THREAD_PRIORITY_NORMAL is zero. If it is "
+ "not, ThreadPriority::BACKGROUND may be incorrectly detected.");
+ static_assert(THREAD_PRIORITY_ABOVE_NORMAL >= 0,
+ "THREAD_PRIORITY_ABOVE_NORMAL is < 0 and will incorrectly be "
+ "translated to ThreadPriority::BACKGROUND.");
+ static_assert(THREAD_PRIORITY_HIGHEST >= 0,
+ "THREAD_PRIORITY_HIGHEST is < 0 and will incorrectly be "
+ "translated to ThreadPriority::BACKGROUND.");
+ static_assert(THREAD_PRIORITY_TIME_CRITICAL >= 0,
+ "THREAD_PRIORITY_TIME_CRITICAL is < 0 and will incorrectly be "
+ "translated to ThreadPriority::BACKGROUND.");
+ static_assert(THREAD_PRIORITY_ERROR_RETURN >= 0,
+ "THREAD_PRIORITY_ERROR_RETURN is < 0 and will incorrectly be "
+ "translated to ThreadPriority::BACKGROUND.");
+
+ const int priority =
+ ::GetThreadPriority(PlatformThread::CurrentHandle().platform_handle());
+
+ // Negative values represent a background priority. We have observed -3, -4,
+ // -6 when THREAD_MODE_BACKGROUND_* is used. THREAD_PRIORITY_IDLE,
+ // THREAD_PRIORITY_LOWEST and THREAD_PRIORITY_BELOW_NORMAL are other possible
+ // negative values.
+ if (priority < THREAD_PRIORITY_NORMAL)
+ return ThreadPriority::BACKGROUND;
+
+ switch (priority) {
+ case kWin7BackgroundThreadModePriority:
+ DCHECK_EQ(win::GetVersion(), win::Version::WIN7);
+ return ThreadPriority::BACKGROUND;
+ case kWin7NormalPriority:
+ DCHECK_EQ(win::GetVersion(), win::Version::WIN7);
+ FALLTHROUGH;
+ case THREAD_PRIORITY_NORMAL:
+ return ThreadPriority::NORMAL;
+ case kWinNormalPriority1:
+ FALLTHROUGH;
+ case kWinNormalPriority2:
+ return ThreadPriority::NORMAL;
+ case THREAD_PRIORITY_ABOVE_NORMAL:
+ case THREAD_PRIORITY_HIGHEST:
+ return ThreadPriority::DISPLAY;
+ case THREAD_PRIORITY_TIME_CRITICAL:
+ return ThreadPriority::REALTIME_AUDIO;
+ case THREAD_PRIORITY_ERROR_RETURN:
+ DPCHECK(false) << "::GetThreadPriority error";
+ }
+
+ NOTREACHED() << "::GetThreadPriority returned " << priority << ".";
+ return ThreadPriority::NORMAL;
+}
+
+// static
+size_t PlatformThread::GetDefaultThreadStackSize() {
+ return 0;
+}
+
+} // namespace base
diff --git a/security/sandbox/chromium/base/threading/platform_thread_win.h b/security/sandbox/chromium/base/threading/platform_thread_win.h
new file mode 100644
index 0000000000..3e833178e3
--- /dev/null
+++ b/security/sandbox/chromium/base/threading/platform_thread_win.h
@@ -0,0 +1,23 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_THREADING_PLATFORM_THREAD_WIN_H_
+#define BASE_THREADING_PLATFORM_THREAD_WIN_H_
+
+#include "base/threading/platform_thread.h"
+
+#include "base/base_export.h"
+
+namespace base {
+namespace internal {
+
+// Assert that the memory priority of |thread| is |memory_priority|. No-op on
+// Windows 7 because ::GetThreadInformation() is not available. Exposed for unit
+// tests.
+BASE_EXPORT void AssertMemoryPriority(HANDLE thread, int memory_priority);
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_THREADING_PLATFORM_THREAD_WIN_H_
diff --git a/security/sandbox/chromium/base/threading/thread_checker_impl.h b/security/sandbox/chromium/base/threading/thread_checker_impl.h
new file mode 100644
index 0000000000..b325db6ae8
--- /dev/null
+++ b/security/sandbox/chromium/base/threading/thread_checker_impl.h
@@ -0,0 +1,74 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_THREADING_THREAD_CHECKER_IMPL_H_
+#define BASE_THREADING_THREAD_CHECKER_IMPL_H_
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "base/sequence_token.h"
+#include "base/synchronization/lock.h"
+#include "base/thread_annotations.h"
+#include "base/threading/platform_thread.h"
+
+namespace base {
+
+// Real implementation of ThreadChecker, for use in debug mode, or for temporary
+// use in release mode (e.g. to CHECK on a threading issue seen only in the
+// wild).
+//
+// Note: You should almost always use the ThreadChecker class to get the right
+// version for your build configuration.
+// Note: This is only a check, not a "lock". It is marked "LOCKABLE" only in
+// order to support thread_annotations.h.
+class LOCKABLE BASE_EXPORT ThreadCheckerImpl {
+ public:
+ ThreadCheckerImpl();
+ ~ThreadCheckerImpl();
+
+ // Allow move construct/assign. This must be called on |other|'s associated
+ // thread and assignment can only be made into a ThreadCheckerImpl which is
+ // detached or already associated with the current thread. This isn't
+ // thread-safe (|this| and |other| shouldn't be in use while this move is
+ // performed). If the assignment was legal, the resulting ThreadCheckerImpl
+ // will be bound to the current thread and |other| will be detached.
+ ThreadCheckerImpl(ThreadCheckerImpl&& other);
+ ThreadCheckerImpl& operator=(ThreadCheckerImpl&& other);
+
+ bool CalledOnValidThread() const WARN_UNUSED_RESULT;
+
+ // Changes the thread that is checked for in CalledOnValidThread. This may
+ // be useful when an object may be created on one thread and then used
+ // exclusively on another thread.
+ void DetachFromThread();
+
+ private:
+ void EnsureAssignedLockRequired() const EXCLUSIVE_LOCKS_REQUIRED(lock_);
+
+ // Members are mutable so that CalledOnValidThread() can set them.
+
+ // Synchronizes access to all members.
+ mutable base::Lock lock_;
+
+ // Thread on which CalledOnValidThread() may return true.
+ mutable PlatformThreadRef thread_id_ GUARDED_BY(lock_);
+
+ // TaskToken for which CalledOnValidThread() always returns true. This allows
+ // CalledOnValidThread() to return true when called multiple times from the
+ // same task, even if it's not running in a single-threaded context itself
+ // (allowing usage of ThreadChecker objects on the stack in the scope of one-
+ // off tasks). Note: CalledOnValidThread() may return true even if the current
+ // TaskToken is not equal to this.
+ mutable TaskToken task_token_ GUARDED_BY(lock_);
+
+ // SequenceToken for which CalledOnValidThread() may return true. Used to
+ // ensure that CalledOnValidThread() doesn't return true for ThreadPool
+ // tasks that happen to run on the same thread but weren't posted to the same
+ // SingleThreadTaskRunner.
+ mutable SequenceToken sequence_token_ GUARDED_BY(lock_);
+};
+
+} // namespace base
+
+#endif // BASE_THREADING_THREAD_CHECKER_IMPL_H_
diff --git a/security/sandbox/chromium/base/threading/thread_collision_warner.cc b/security/sandbox/chromium/base/threading/thread_collision_warner.cc
new file mode 100644
index 0000000000..547e11ca66
--- /dev/null
+++ b/security/sandbox/chromium/base/threading/thread_collision_warner.cc
@@ -0,0 +1,64 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/thread_collision_warner.h"
+
+#include "base/logging.h"
+#include "base/threading/platform_thread.h"
+
+namespace base {
+
+void DCheckAsserter::warn() {
+ NOTREACHED() << "Thread Collision";
+}
+
+static subtle::Atomic32 CurrentThread() {
+ const PlatformThreadId current_thread_id = PlatformThread::CurrentId();
+ // We need to get the thread id into an atomic data type. This might be a
+ // truncating conversion, but any loss-of-information just increases the
+ // chance of a fault negative, not a false positive.
+ const subtle::Atomic32 atomic_thread_id =
+ static_cast<subtle::Atomic32>(current_thread_id);
+
+ return atomic_thread_id;
+}
+
+void ThreadCollisionWarner::EnterSelf() {
+ // If the active thread is 0 then I'll write the current thread ID
+ // if two or more threads arrive here only one will succeed to
+ // write on valid_thread_id_ the current thread ID.
+ subtle::Atomic32 current_thread_id = CurrentThread();
+
+ int previous_value = subtle::NoBarrier_CompareAndSwap(&valid_thread_id_,
+ 0,
+ current_thread_id);
+ if (previous_value != 0 && previous_value != current_thread_id) {
+ // gotcha! a thread is trying to use the same class and that is
+ // not current thread.
+ asserter_->warn();
+ }
+
+ subtle::NoBarrier_AtomicIncrement(&counter_, 1);
+}
+
+void ThreadCollisionWarner::Enter() {
+ subtle::Atomic32 current_thread_id = CurrentThread();
+
+ if (subtle::NoBarrier_CompareAndSwap(&valid_thread_id_,
+ 0,
+ current_thread_id) != 0) {
+ // gotcha! another thread is trying to use the same class.
+ asserter_->warn();
+ }
+
+ subtle::NoBarrier_AtomicIncrement(&counter_, 1);
+}
+
+void ThreadCollisionWarner::Leave() {
+ if (subtle::Barrier_AtomicIncrement(&counter_, -1) == 0) {
+ subtle::NoBarrier_Store(&valid_thread_id_, 0);
+ }
+}
+
+} // namespace base
diff --git a/security/sandbox/chromium/base/threading/thread_collision_warner.h b/security/sandbox/chromium/base/threading/thread_collision_warner.h
new file mode 100644
index 0000000000..7f7443b21d
--- /dev/null
+++ b/security/sandbox/chromium/base/threading/thread_collision_warner.h
@@ -0,0 +1,252 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_THREADING_THREAD_COLLISION_WARNER_H_
+#define BASE_THREADING_THREAD_COLLISION_WARNER_H_
+
+#include <memory>
+
+#include "base/atomicops.h"
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+
+// A helper class alongside macros to be used to verify assumptions about thread
+// safety of a class.
+//
+// Example: Queue implementation non thread-safe but still usable if clients
+// are synchronized somehow.
+//
+// In this case the macro DFAKE_SCOPED_LOCK has to be
+// used, it checks that if a thread is inside the push/pop then
+// noone else is still inside the pop/push
+//
+// class NonThreadSafeQueue {
+// public:
+// ...
+// void push(int) { DFAKE_SCOPED_LOCK(push_pop_); ... }
+// int pop() { DFAKE_SCOPED_LOCK(push_pop_); ... }
+// ...
+// private:
+// DFAKE_MUTEX(push_pop_);
+// };
+//
+//
+// Example: Queue implementation non thread-safe but still usable if clients
+// are synchronized somehow, it calls a method to "protect" from
+// a "protected" method
+//
+// In this case the macro DFAKE_SCOPED_RECURSIVE_LOCK
+// has to be used, it checks that if a thread is inside the push/pop
+// then noone else is still inside the pop/push
+//
+// class NonThreadSafeQueue {
+// public:
+// void push(int) {
+// DFAKE_SCOPED_LOCK(push_pop_);
+// ...
+// }
+// int pop() {
+// DFAKE_SCOPED_RECURSIVE_LOCK(push_pop_);
+// bar();
+// ...
+// }
+// void bar() { DFAKE_SCOPED_RECURSIVE_LOCK(push_pop_); ... }
+// ...
+// private:
+// DFAKE_MUTEX(push_pop_);
+// };
+//
+//
+// Example: Queue implementation not usable even if clients are synchronized,
+// so only one thread in the class life cycle can use the two members
+// push/pop.
+//
+// In this case the macro DFAKE_SCOPED_LOCK_THREAD_LOCKED pins the
+// specified
+// critical section the first time a thread enters push or pop, from
+// that time on only that thread is allowed to execute push or pop.
+//
+// class NonThreadSafeQueue {
+// public:
+// ...
+// void push(int) { DFAKE_SCOPED_LOCK_THREAD_LOCKED(push_pop_); ... }
+// int pop() { DFAKE_SCOPED_LOCK_THREAD_LOCKED(push_pop_); ... }
+// ...
+// private:
+// DFAKE_MUTEX(push_pop_);
+// };
+//
+//
+// Example: Class that has to be contructed/destroyed on same thread, it has
+// a "shareable" method (with external synchronization) and a not
+// shareable method (even with external synchronization).
+//
+// In this case 3 Critical sections have to be defined
+//
+// class ExoticClass {
+// public:
+// ExoticClass() { DFAKE_SCOPED_LOCK_THREAD_LOCKED(ctor_dtor_); ... }
+// ~ExoticClass() { DFAKE_SCOPED_LOCK_THREAD_LOCKED(ctor_dtor_); ... }
+//
+// void Shareable() { DFAKE_SCOPED_LOCK(shareable_section_); ... }
+// void NotShareable() { DFAKE_SCOPED_LOCK_THREAD_LOCKED(ctor_dtor_); ... }
+// ...
+// private:
+// DFAKE_MUTEX(ctor_dtor_);
+// DFAKE_MUTEX(shareable_section_);
+// };
+
+
+#if !defined(NDEBUG)
+
+#define DFAKE_UNIQUE_VARIABLE_CONCAT(a, b) a##b
+// CONCAT1 provides extra level of indirection so that __LINE__ macro expands.
+#define DFAKE_UNIQUE_VARIABLE_CONCAT1(a, b) DFAKE_UNIQUE_VARIABLE_CONCAT(a, b)
+#define DFAKE_UNIQUE_VARIABLE_NAME(a) DFAKE_UNIQUE_VARIABLE_CONCAT1(a, __LINE__)
+
+// Defines a class member that acts like a mutex. It is used only as a
+// verification tool.
+#define DFAKE_MUTEX(obj) \
+ mutable base::ThreadCollisionWarner obj
+// Asserts the call is never called simultaneously in two threads. Used at
+// member function scope.
+#define DFAKE_SCOPED_LOCK(obj) \
+ base::ThreadCollisionWarner::ScopedCheck DFAKE_UNIQUE_VARIABLE_NAME( \
+ s_check_)(&obj)
+// Asserts the call is never called simultaneously in two threads. Used at
+// member function scope. Same as DFAKE_SCOPED_LOCK but allows recursive locks.
+#define DFAKE_SCOPED_RECURSIVE_LOCK(obj) \
+ base::ThreadCollisionWarner::ScopedRecursiveCheck \
+ DFAKE_UNIQUE_VARIABLE_NAME(sr_check)(&obj)
+// Asserts the code is always executed in the same thread.
+#define DFAKE_SCOPED_LOCK_THREAD_LOCKED(obj) \
+ base::ThreadCollisionWarner::Check DFAKE_UNIQUE_VARIABLE_NAME(check_)(&obj)
+
+#else
+
+#define DFAKE_MUTEX(obj) typedef void InternalFakeMutexType##obj
+#define DFAKE_SCOPED_LOCK(obj) ((void)0)
+#define DFAKE_SCOPED_RECURSIVE_LOCK(obj) ((void)0)
+#define DFAKE_SCOPED_LOCK_THREAD_LOCKED(obj) ((void)0)
+
+#endif
+
+namespace base {
+
+// The class ThreadCollisionWarner uses an Asserter to notify the collision
+// AsserterBase is the interfaces and DCheckAsserter is the default asserter
+// used. During the unit tests is used another class that doesn't "DCHECK"
+// in case of collision (check thread_collision_warner_unittests.cc)
+struct BASE_EXPORT AsserterBase {
+ virtual ~AsserterBase() = default;
+ virtual void warn() = 0;
+};
+
+struct BASE_EXPORT DCheckAsserter : public AsserterBase {
+ ~DCheckAsserter() override = default;
+ void warn() override;
+};
+
+class BASE_EXPORT ThreadCollisionWarner {
+ public:
+ // The parameter asserter is there only for test purpose
+ explicit ThreadCollisionWarner(AsserterBase* asserter = new DCheckAsserter())
+ : valid_thread_id_(0),
+ counter_(0),
+ asserter_(asserter) {}
+
+ ~ThreadCollisionWarner() {
+ delete asserter_;
+ }
+
+ // This class is meant to be used through the macro
+ // DFAKE_SCOPED_LOCK_THREAD_LOCKED
+ // it doesn't leave the critical section, as opposed to ScopedCheck,
+ // because the critical section being pinned is allowed to be used only
+ // from one thread
+ class BASE_EXPORT Check {
+ public:
+ explicit Check(ThreadCollisionWarner* warner)
+ : warner_(warner) {
+ warner_->EnterSelf();
+ }
+
+ ~Check() = default;
+
+ private:
+ ThreadCollisionWarner* warner_;
+
+ DISALLOW_COPY_AND_ASSIGN(Check);
+ };
+
+ // This class is meant to be used through the macro
+ // DFAKE_SCOPED_LOCK
+ class BASE_EXPORT ScopedCheck {
+ public:
+ explicit ScopedCheck(ThreadCollisionWarner* warner)
+ : warner_(warner) {
+ warner_->Enter();
+ }
+
+ ~ScopedCheck() {
+ warner_->Leave();
+ }
+
+ private:
+ ThreadCollisionWarner* warner_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedCheck);
+ };
+
+ // This class is meant to be used through the macro
+ // DFAKE_SCOPED_RECURSIVE_LOCK
+ class BASE_EXPORT ScopedRecursiveCheck {
+ public:
+ explicit ScopedRecursiveCheck(ThreadCollisionWarner* warner)
+ : warner_(warner) {
+ warner_->EnterSelf();
+ }
+
+ ~ScopedRecursiveCheck() {
+ warner_->Leave();
+ }
+
+ private:
+ ThreadCollisionWarner* warner_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedRecursiveCheck);
+ };
+
+ private:
+ // This method stores the current thread identifier and does a DCHECK
+ // if a another thread has already done it, it is safe if same thread
+ // calls this multiple time (recursion allowed).
+ void EnterSelf();
+
+ // Same as EnterSelf but recursion is not allowed.
+ void Enter();
+
+ // Removes the thread_id stored in order to allow other threads to
+ // call EnterSelf or Enter.
+ void Leave();
+
+ // This stores the thread id that is inside the critical section, if the
+ // value is 0 then no thread is inside.
+ volatile subtle::Atomic32 valid_thread_id_;
+
+ // Counter to trace how many time a critical section was "pinned"
+ // (when allowed) in order to unpin it when counter_ reaches 0.
+ volatile subtle::Atomic32 counter_;
+
+ // Here only for class unit tests purpose, during the test I need to not
+ // DCHECK but notify the collision with something else.
+ AsserterBase* asserter_;
+
+ DISALLOW_COPY_AND_ASSIGN(ThreadCollisionWarner);
+};
+
+} // namespace base
+
+#endif // BASE_THREADING_THREAD_COLLISION_WARNER_H_
diff --git a/security/sandbox/chromium/base/threading/thread_id_name_manager.cc b/security/sandbox/chromium/base/threading/thread_id_name_manager.cc
new file mode 100644
index 0000000000..ba2f9b41cb
--- /dev/null
+++ b/security/sandbox/chromium/base/threading/thread_id_name_manager.cc
@@ -0,0 +1,147 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/thread_id_name_manager.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "base/logging.h"
+#include "base/memory/singleton.h"
+#include "base/no_destructor.h"
+#include "base/stl_util.h"
+#include "base/strings/string_util.h"
+#include "base/threading/thread_local.h"
+#include "base/trace_event/heap_profiler_allocation_context_tracker.h"
+
+namespace base {
+namespace {
+
+static const char kDefaultName[] = "";
+static std::string* g_default_name;
+
+ThreadLocalStorage::Slot& GetThreadNameTLS() {
+ static base::NoDestructor<base::ThreadLocalStorage::Slot> thread_name_tls;
+ return *thread_name_tls;
+}
+}
+
+ThreadIdNameManager::Observer::~Observer() = default;
+
+ThreadIdNameManager::ThreadIdNameManager()
+ : main_process_name_(nullptr), main_process_id_(kInvalidThreadId) {
+ g_default_name = new std::string(kDefaultName);
+
+ AutoLock locked(lock_);
+ name_to_interned_name_[kDefaultName] = g_default_name;
+}
+
+ThreadIdNameManager::~ThreadIdNameManager() = default;
+
+ThreadIdNameManager* ThreadIdNameManager::GetInstance() {
+ return Singleton<ThreadIdNameManager,
+ LeakySingletonTraits<ThreadIdNameManager> >::get();
+}
+
+const char* ThreadIdNameManager::GetDefaultInternedString() {
+ return g_default_name->c_str();
+}
+
+void ThreadIdNameManager::RegisterThread(PlatformThreadHandle::Handle handle,
+ PlatformThreadId id) {
+ AutoLock locked(lock_);
+ thread_id_to_handle_[id] = handle;
+ thread_handle_to_interned_name_[handle] =
+ name_to_interned_name_[kDefaultName];
+}
+
+void ThreadIdNameManager::AddObserver(Observer* obs) {
+ AutoLock locked(lock_);
+ DCHECK(!base::Contains(observers_, obs));
+ observers_.push_back(obs);
+}
+
+void ThreadIdNameManager::RemoveObserver(Observer* obs) {
+ AutoLock locked(lock_);
+ DCHECK(base::Contains(observers_, obs));
+ base::Erase(observers_, obs);
+}
+
+void ThreadIdNameManager::SetName(const std::string& name) {
+ PlatformThreadId id = PlatformThread::CurrentId();
+ std::string* leaked_str = nullptr;
+ {
+ AutoLock locked(lock_);
+ auto iter = name_to_interned_name_.find(name);
+ if (iter != name_to_interned_name_.end()) {
+ leaked_str = iter->second;
+ } else {
+ leaked_str = new std::string(name);
+ name_to_interned_name_[name] = leaked_str;
+ }
+
+ auto id_to_handle_iter = thread_id_to_handle_.find(id);
+
+ GetThreadNameTLS().Set(const_cast<char*>(leaked_str->c_str()));
+ for (Observer* obs : observers_)
+ obs->OnThreadNameChanged(leaked_str->c_str());
+
+ // The main thread of a process will not be created as a Thread object which
+ // means there is no PlatformThreadHandler registered.
+ if (id_to_handle_iter == thread_id_to_handle_.end()) {
+ main_process_name_ = leaked_str;
+ main_process_id_ = id;
+ return;
+ }
+ thread_handle_to_interned_name_[id_to_handle_iter->second] = leaked_str;
+ }
+
+ // Add the leaked thread name to heap profiler context tracker. The name added
+ // is valid for the lifetime of the process. AllocationContextTracker cannot
+ // call GetName(which holds a lock) during the first allocation because it can
+ // cause a deadlock when the first allocation happens in the
+ // ThreadIdNameManager itself when holding the lock.
+ trace_event::AllocationContextTracker::SetCurrentThreadName(
+ leaked_str->c_str());
+}
+
+const char* ThreadIdNameManager::GetName(PlatformThreadId id) {
+ AutoLock locked(lock_);
+
+ if (id == main_process_id_)
+ return main_process_name_->c_str();
+
+ auto id_to_handle_iter = thread_id_to_handle_.find(id);
+ if (id_to_handle_iter == thread_id_to_handle_.end())
+ return name_to_interned_name_[kDefaultName]->c_str();
+
+ auto handle_to_name_iter =
+ thread_handle_to_interned_name_.find(id_to_handle_iter->second);
+ return handle_to_name_iter->second->c_str();
+}
+
+const char* ThreadIdNameManager::GetNameForCurrentThread() {
+ const char* name = reinterpret_cast<const char*>(GetThreadNameTLS().Get());
+ return name ? name : kDefaultName;
+}
+
+void ThreadIdNameManager::RemoveName(PlatformThreadHandle::Handle handle,
+ PlatformThreadId id) {
+ AutoLock locked(lock_);
+ auto handle_to_name_iter = thread_handle_to_interned_name_.find(handle);
+
+ DCHECK(handle_to_name_iter != thread_handle_to_interned_name_.end());
+ thread_handle_to_interned_name_.erase(handle_to_name_iter);
+
+ auto id_to_handle_iter = thread_id_to_handle_.find(id);
+ DCHECK((id_to_handle_iter!= thread_id_to_handle_.end()));
+ // The given |id| may have been re-used by the system. Make sure the
+ // mapping points to the provided |handle| before removal.
+ if (id_to_handle_iter->second != handle)
+ return;
+
+ thread_id_to_handle_.erase(id_to_handle_iter);
+}
+
+} // namespace base
diff --git a/security/sandbox/chromium/base/threading/thread_id_name_manager.h b/security/sandbox/chromium/base/threading/thread_id_name_manager.h
new file mode 100644
index 0000000000..e413da5d03
--- /dev/null
+++ b/security/sandbox/chromium/base/threading/thread_id_name_manager.h
@@ -0,0 +1,94 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_THREADING_THREAD_ID_NAME_MANAGER_H_
+#define BASE_THREADING_THREAD_ID_NAME_MANAGER_H_
+
+#include <map>
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/macros.h"
+#include "base/observer_list.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/platform_thread.h"
+
+namespace base {
+
+template <typename T>
+struct DefaultSingletonTraits;
+
+class BASE_EXPORT ThreadIdNameManager {
+ public:
+ static ThreadIdNameManager* GetInstance();
+
+ static const char* GetDefaultInternedString();
+
+ class BASE_EXPORT Observer {
+ public:
+ virtual ~Observer();
+
+ // Called on the thread whose name is changing, immediately after the name
+ // is set. |name| is a pointer to a C string that is guaranteed to remain
+ // valid for the duration of the process.
+ //
+ // NOTE: Will be called while ThreadIdNameManager's lock is held, so don't
+ // call back into it.
+ virtual void OnThreadNameChanged(const char* name) = 0;
+ };
+
+ // Register the mapping between a thread |id| and |handle|.
+ void RegisterThread(PlatformThreadHandle::Handle handle, PlatformThreadId id);
+
+ void AddObserver(Observer*);
+ void RemoveObserver(Observer*);
+
+ // Set the name for the current thread.
+ void SetName(const std::string& name);
+
+ // Get the name for the given id.
+ const char* GetName(PlatformThreadId id);
+
+ // Unlike |GetName|, this method using TLS and avoids touching |lock_|.
+ const char* GetNameForCurrentThread();
+
+ // Remove the name for the given id.
+ void RemoveName(PlatformThreadHandle::Handle handle, PlatformThreadId id);
+
+ private:
+ friend struct DefaultSingletonTraits<ThreadIdNameManager>;
+
+ typedef std::map<PlatformThreadId, PlatformThreadHandle::Handle>
+ ThreadIdToHandleMap;
+ typedef std::map<PlatformThreadHandle::Handle, std::string*>
+ ThreadHandleToInternedNameMap;
+ typedef std::map<std::string, std::string*> NameToInternedNameMap;
+
+ ThreadIdNameManager();
+ ~ThreadIdNameManager();
+
+ // lock_ protects the name_to_interned_name_, thread_id_to_handle_ and
+ // thread_handle_to_interned_name_ maps.
+ Lock lock_;
+
+ NameToInternedNameMap name_to_interned_name_;
+ ThreadIdToHandleMap thread_id_to_handle_;
+ ThreadHandleToInternedNameMap thread_handle_to_interned_name_;
+
+ // Treat the main process specially as there is no PlatformThreadHandle.
+ std::string* main_process_name_;
+ PlatformThreadId main_process_id_;
+
+ // There's no point using a base::ObserverList behind a lock, so we just use
+ // an std::vector instead.
+ std::vector<Observer*> observers_;
+
+ DISALLOW_COPY_AND_ASSIGN(ThreadIdNameManager);
+};
+
+} // namespace base
+
+#endif // BASE_THREADING_THREAD_ID_NAME_MANAGER_H_
diff --git a/security/sandbox/chromium/base/threading/thread_local.h b/security/sandbox/chromium/base/threading/thread_local.h
new file mode 100644
index 0000000000..162b10bf0f
--- /dev/null
+++ b/security/sandbox/chromium/base/threading/thread_local.h
@@ -0,0 +1,136 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// WARNING: Thread local storage is a bit tricky to get right. Please make sure
+// that this is really the proper solution for what you're trying to achieve.
+// Don't prematurely optimize, most likely you can just use a Lock.
+//
+// These classes implement a wrapper around ThreadLocalStorage::Slot. On
+// construction, they will allocate a TLS slot, and free the TLS slot on
+// destruction. No memory management (creation or destruction) is handled. This
+// means for uses of ThreadLocalPointer, you must correctly manage the memory
+// yourself, these classes will not destroy the pointer for you. There are no
+// at-thread-exit actions taken by these classes.
+//
+// ThreadLocalPointer<Type> wraps a Type*. It performs no creation or
+// destruction, so memory management must be handled elsewhere. The first call
+// to Get() on a thread will return NULL. You can update the pointer with a call
+// to Set().
+//
+// ThreadLocalBoolean wraps a bool. It will default to false if it has never
+// been set otherwise with Set().
+//
+// Thread Safety: An instance of ThreadLocalStorage is completely thread safe
+// once it has been created. If you want to dynamically create an instance, you
+// must of course properly deal with safety and race conditions.
+//
+// In Android, the system TLS is limited.
+//
+// Example usage:
+// // My class is logically attached to a single thread. We cache a pointer
+// // on the thread it was created on, so we can implement current().
+// MyClass::MyClass() {
+// DCHECK(Singleton<ThreadLocalPointer<MyClass> >::get()->Get() == NULL);
+// Singleton<ThreadLocalPointer<MyClass> >::get()->Set(this);
+// }
+//
+// MyClass::~MyClass() {
+// DCHECK(Singleton<ThreadLocalPointer<MyClass> >::get()->Get() != NULL);
+// Singleton<ThreadLocalPointer<MyClass> >::get()->Set(NULL);
+// }
+//
+// // Return the current MyClass associated with the calling thread, can be
+// // NULL if there isn't a MyClass associated.
+// MyClass* MyClass::current() {
+// return Singleton<ThreadLocalPointer<MyClass> >::get()->Get();
+// }
+
+#ifndef BASE_THREADING_THREAD_LOCAL_H_
+#define BASE_THREADING_THREAD_LOCAL_H_
+
+#include <memory>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/threading/thread_local_internal.h"
+#include "base/threading/thread_local_storage.h"
+
+namespace base {
+
+template <typename T>
+class ThreadLocalPointer {
+ public:
+ ThreadLocalPointer() = default;
+ ~ThreadLocalPointer() = default;
+
+ T* Get() const { return static_cast<T*>(slot_.Get()); }
+
+ void Set(T* ptr) {
+ slot_.Set(const_cast<void*>(static_cast<const void*>(ptr)));
+ }
+
+ private:
+ ThreadLocalStorage::Slot slot_;
+
+ DISALLOW_COPY_AND_ASSIGN(ThreadLocalPointer);
+};
+
+// A ThreadLocalOwnedPointer<T> is like a ThreadLocalPointer<T> except that
+// pointers handed to it are owned and automatically deleted during their
+// associated thread's exit phase (or when replaced if Set() is invoked multiple
+// times on the same thread).
+// The ThreadLocalOwnedPointer instance itself can only be destroyed when no
+// threads, other than the one it is destroyed on, have remaining state set in
+// it. Typically this means that ThreadLocalOwnedPointer instances are held in
+// static storage or at the very least only recycled in the single-threaded
+// phase between tests in the same process.
+#if DCHECK_IS_ON()
+template <typename T>
+using ThreadLocalOwnedPointer = internal::CheckedThreadLocalOwnedPointer<T>;
+#else // DCHECK_IS_ON()
+template <typename T>
+class ThreadLocalOwnedPointer {
+ public:
+ ThreadLocalOwnedPointer() = default;
+
+ ~ThreadLocalOwnedPointer() {
+ // Assume that this thread is the only one with potential state left. This
+ // is verified in ~CheckedThreadLocalOwnedPointer().
+ Set(nullptr);
+ }
+
+ T* Get() const { return static_cast<T*>(slot_.Get()); }
+
+ void Set(std::unique_ptr<T> ptr) {
+ delete Get();
+ slot_.Set(const_cast<void*>(static_cast<const void*>(ptr.release())));
+ }
+
+ private:
+ static void DeleteTlsPtr(void* ptr) { delete static_cast<T*>(ptr); }
+
+ ThreadLocalStorage::Slot slot_{&DeleteTlsPtr};
+
+ DISALLOW_COPY_AND_ASSIGN(ThreadLocalOwnedPointer);
+};
+#endif // DCHECK_IS_ON()
+
+class ThreadLocalBoolean {
+ public:
+ ThreadLocalBoolean() = default;
+ ~ThreadLocalBoolean() = default;
+
+ bool Get() const { return tlp_.Get() != nullptr; }
+
+ void Set(bool val) { tlp_.Set(val ? this : nullptr); }
+
+ private:
+ ThreadLocalPointer<void> tlp_;
+
+ DISALLOW_COPY_AND_ASSIGN(ThreadLocalBoolean);
+};
+
+} // namespace base
+
+#endif // BASE_THREADING_THREAD_LOCAL_H_
diff --git a/security/sandbox/chromium/base/threading/thread_local_internal.h b/security/sandbox/chromium/base/threading/thread_local_internal.h
new file mode 100644
index 0000000000..bd2bea0636
--- /dev/null
+++ b/security/sandbox/chromium/base/threading/thread_local_internal.h
@@ -0,0 +1,80 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_THREADING_THREAD_LOCAL_INTERNAL_H_
+#define BASE_THREADING_THREAD_LOCAL_INTERNAL_H_
+
+#if DCHECK_IS_ON()
+
+#include <atomic>
+#include <memory>
+
+#include "base/macros.h"
+#include "base/threading/thread_local_storage.h"
+
+namespace base {
+namespace internal {
+
+// A version of ThreadLocalOwnedPointer which verifies that it's only destroyed
+// when no threads, other than the one it is destroyed on, have remaining state
+// set in it. A ThreadLocalOwnedPointer instance being destroyed too early would
+// result in leaks per unregistering the TLS slot (and thus the DeleteTlsPtr
+// hook).
+template <typename T>
+class CheckedThreadLocalOwnedPointer {
+ public:
+ CheckedThreadLocalOwnedPointer() = default;
+
+ ~CheckedThreadLocalOwnedPointer() {
+ Set(nullptr);
+
+ DCHECK_EQ(num_assigned_threads_.load(std::memory_order_relaxed), 0)
+ << "Memory leak: Must join all threads or release all associated "
+ "thread-local slots before ~ThreadLocalOwnedPointer";
+ }
+
+ T* Get() const {
+ PtrTracker* const ptr_tracker = static_cast<PtrTracker*>(slot_.Get());
+ return ptr_tracker ? ptr_tracker->ptr_.get() : nullptr;
+ }
+
+ void Set(std::unique_ptr<T> ptr) {
+ delete static_cast<PtrTracker*>(slot_.Get());
+ if (ptr)
+ slot_.Set(new PtrTracker(this, std::move(ptr)));
+ else
+ slot_.Set(nullptr);
+ }
+
+ private:
+ struct PtrTracker {
+ public:
+ PtrTracker(CheckedThreadLocalOwnedPointer<T>* outer, std::unique_ptr<T> ptr)
+ : outer_(outer), ptr_(std::move(ptr)) {
+ outer_->num_assigned_threads_.fetch_add(1, std::memory_order_relaxed);
+ }
+
+ ~PtrTracker() {
+ outer_->num_assigned_threads_.fetch_sub(1, std::memory_order_relaxed);
+ }
+
+ CheckedThreadLocalOwnedPointer<T>* const outer_;
+ const std::unique_ptr<T> ptr_;
+ };
+
+ static void DeleteTlsPtr(void* ptr) { delete static_cast<PtrTracker*>(ptr); }
+
+ ThreadLocalStorage::Slot slot_{&DeleteTlsPtr};
+
+ std::atomic_int num_assigned_threads_{0};
+
+ DISALLOW_COPY_AND_ASSIGN(CheckedThreadLocalOwnedPointer);
+};
+
+} // namespace internal
+} // namespace base
+
+#endif // DCHECK_IS_ON()
+
+#endif // BASE_THREADING_THREAD_LOCAL_INTERNAL_H_
diff --git a/security/sandbox/chromium/base/threading/thread_local_storage.cc b/security/sandbox/chromium/base/threading/thread_local_storage.cc
new file mode 100644
index 0000000000..204f34c272
--- /dev/null
+++ b/security/sandbox/chromium/base/threading/thread_local_storage.cc
@@ -0,0 +1,461 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/thread_local_storage.h"
+
+#include "base/atomicops.h"
+#include "base/compiler_specific.h"
+#include "base/logging.h"
+#include "base/no_destructor.h"
+#include "base/synchronization/lock.h"
+#include "build/build_config.h"
+
+using base::internal::PlatformThreadLocalStorage;
+
+// Chrome Thread Local Storage (TLS)
+//
+// This TLS system allows Chrome to use a single OS level TLS slot process-wide,
+// and allows us to control the slot limits instead of being at the mercy of the
+// platform. To do this, Chrome TLS replicates an array commonly found in the OS
+// thread metadata.
+//
+// Overview:
+//
+// OS TLS Slots Per-Thread Per-Process Global
+// ...
+// [] Chrome TLS Array Chrome TLS Metadata
+// [] ----------> [][][][][ ][][][][] [][][][][ ][][][][]
+// [] | |
+// ... V V
+// Metadata Version Slot Information
+// Your Data!
+//
+// Using a single OS TLS slot, Chrome TLS allocates an array on demand for the
+// lifetime of each thread that requests Chrome TLS data. Each per-thread TLS
+// array matches the length of the per-process global metadata array.
+//
+// A per-process global TLS metadata array tracks information about each item in
+// the per-thread array:
+// * Status: Tracks if the slot is allocated or free to assign.
+// * Destructor: An optional destructor to call on thread destruction for that
+// specific slot.
+// * Version: Tracks the current version of the TLS slot. Each TLS slot
+// allocation is associated with a unique version number.
+//
+// Most OS TLS APIs guarantee that a newly allocated TLS slot is
+// initialized to 0 for all threads. The Chrome TLS system provides
+// this guarantee by tracking the version for each TLS slot here
+// on each per-thread Chrome TLS array entry. Threads that access
+// a slot with a mismatched version will receive 0 as their value.
+// The metadata version is incremented when the client frees a
+// slot. The per-thread metadata version is updated when a client
+// writes to the slot. This scheme allows for constant time
+// invalidation and avoids the need to iterate through each Chrome
+// TLS array to mark the slot as zero.
+//
+// Just like an OS TLS API, clients of the Chrome TLS are responsible for
+// managing any necessary lifetime of the data in their slots. The only
+// convenience provided is automatic destruction when a thread ends. If a client
+// frees a slot, that client is responsible for destroying the data in the slot.
+
+namespace {
+// In order to make TLS destructors work, we need to keep around a function
+// pointer to the destructor for each slot. We keep this array of pointers in a
+// global (static) array.
+// We use the single OS-level TLS slot (giving us one pointer per thread) to
+// hold a pointer to a per-thread array (table) of slots that we allocate to
+// Chromium consumers.
+
+// g_native_tls_key is the one native TLS that we use. It stores our table.
+base::subtle::Atomic32 g_native_tls_key =
+ PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES;
+
+// The OS TLS slot has the following states. The TLS slot's lower 2 bits contain
+// the state, the upper bits the TlsVectorEntry*.
+// * kUninitialized: Any call to Slot::Get()/Set() will create the base
+// per-thread TLS state. kUninitialized must be null.
+// * kInUse: value has been created and is in use.
+// * kDestroying: Set when the thread is exiting prior to deleting any of the
+// values stored in the TlsVectorEntry*. This state is necessary so that
+// sequence/task checks won't be done while in the process of deleting the
+// tls entries (see comments in SequenceCheckerImpl for more details).
+// * kDestroyed: All of the values in the vector have been deallocated and
+// the TlsVectorEntry has been deleted.
+//
+// Final States:
+// * Windows: kDestroyed. Windows does not iterate through the OS TLS to clean
+// up the values.
+// * POSIX: kUninitialized. POSIX iterates through TLS until all slots contain
+// nullptr.
+//
+// More details on this design:
+// We need some type of thread-local state to indicate that the TLS system has
+// been destroyed. To do so, we leverage the multi-pass nature of destruction
+// of pthread_key.
+//
+// a) After destruction of TLS system, we set the pthread_key to a sentinel
+// kDestroyed.
+// b) All calls to Slot::Get() DCHECK that the state is not kDestroyed, and
+// any system which might potentially invoke Slot::Get() after destruction
+// of TLS must check ThreadLocalStorage::ThreadIsBeingDestroyed().
+// c) After a full pass of the pthread_keys, on the next invocation of
+// ConstructTlsVector(), we'll then set the key to nullptr.
+// d) At this stage, the TLS system is back in its uninitialized state.
+// e) If in the second pass of destruction of pthread_keys something were to
+// re-initialize TLS [this should never happen! Since the only code which
+// uses Chrome TLS is Chrome controlled, we should really be striving for
+// single-pass destruction], then TLS will be re-initialized and then go
+// through the 2-pass destruction system again. Everything should just
+// work (TM).
+
+// The state of the tls-entry.
+enum class TlsVectorState {
+ kUninitialized = 0,
+
+ // In the process of destroying the entries in the vector.
+ kDestroying,
+
+ // All of the entries and the vector has been destroyed.
+ kDestroyed,
+
+ // The vector has been initialized and is in use.
+ kInUse,
+
+ kMaxValue = kInUse
+};
+
+// Bit-mask used to store TlsVectorState.
+constexpr uintptr_t kVectorStateBitMask = 3;
+static_assert(static_cast<int>(TlsVectorState::kMaxValue) <=
+ kVectorStateBitMask,
+ "number of states must fit in header");
+static_assert(static_cast<int>(TlsVectorState::kUninitialized) == 0,
+ "kUninitialized must be null");
+
+// The maximum number of slots in our thread local storage stack.
+constexpr int kThreadLocalStorageSize = 256;
+
+enum TlsStatus {
+ FREE,
+ IN_USE,
+};
+
+struct TlsMetadata {
+ TlsStatus status;
+ base::ThreadLocalStorage::TLSDestructorFunc destructor;
+ // Incremented every time a slot is reused. Used to detect reuse of slots.
+ uint32_t version;
+};
+
+struct TlsVectorEntry {
+ void* data;
+ uint32_t version;
+};
+
+// This lock isn't needed until after we've constructed the per-thread TLS
+// vector, so it's safe to use.
+base::Lock* GetTLSMetadataLock() {
+ static auto* lock = new base::Lock();
+ return lock;
+}
+TlsMetadata g_tls_metadata[kThreadLocalStorageSize];
+size_t g_last_assigned_slot = 0;
+
+// The maximum number of times to try to clear slots by calling destructors.
+// Use pthread naming convention for clarity.
+constexpr int kMaxDestructorIterations = kThreadLocalStorageSize;
+
+// Sets the value and state of the vector.
+void SetTlsVectorValue(PlatformThreadLocalStorage::TLSKey key,
+ TlsVectorEntry* tls_data,
+ TlsVectorState state) {
+ DCHECK(tls_data || (state == TlsVectorState::kUninitialized) ||
+ (state == TlsVectorState::kDestroyed));
+ PlatformThreadLocalStorage::SetTLSValue(
+ key, reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(tls_data) |
+ static_cast<uintptr_t>(state)));
+}
+
+// Returns the tls vector and current state from the raw tls value.
+TlsVectorState GetTlsVectorStateAndValue(void* tls_value,
+ TlsVectorEntry** entry = nullptr) {
+ if (entry) {
+ *entry = reinterpret_cast<TlsVectorEntry*>(
+ reinterpret_cast<uintptr_t>(tls_value) & ~kVectorStateBitMask);
+ }
+ return static_cast<TlsVectorState>(reinterpret_cast<uintptr_t>(tls_value) &
+ kVectorStateBitMask);
+}
+
+// Returns the tls vector and state using the tls key.
+TlsVectorState GetTlsVectorStateAndValue(PlatformThreadLocalStorage::TLSKey key,
+ TlsVectorEntry** entry = nullptr) {
+ return GetTlsVectorStateAndValue(PlatformThreadLocalStorage::GetTLSValue(key),
+ entry);
+}
+
+// This function is called to initialize our entire Chromium TLS system.
+// It may be called very early, and we need to complete most all of the setup
+// (initialization) before calling *any* memory allocator functions, which may
+// recursively depend on this initialization.
+// As a result, we use Atomics, and avoid anything (like a singleton) that might
+// require memory allocations.
+TlsVectorEntry* ConstructTlsVector() {
+ PlatformThreadLocalStorage::TLSKey key =
+ base::subtle::NoBarrier_Load(&g_native_tls_key);
+ if (key == PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES) {
+ CHECK(PlatformThreadLocalStorage::AllocTLS(&key));
+
+ // The TLS_KEY_OUT_OF_INDEXES is used to find out whether the key is set or
+ // not in NoBarrier_CompareAndSwap, but Posix doesn't have invalid key, we
+ // define an almost impossible value be it.
+ // If we really get TLS_KEY_OUT_OF_INDEXES as value of key, just alloc
+ // another TLS slot.
+ if (key == PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES) {
+ PlatformThreadLocalStorage::TLSKey tmp = key;
+ CHECK(PlatformThreadLocalStorage::AllocTLS(&key) &&
+ key != PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES);
+ PlatformThreadLocalStorage::FreeTLS(tmp);
+ }
+ // Atomically test-and-set the tls_key. If the key is
+ // TLS_KEY_OUT_OF_INDEXES, go ahead and set it. Otherwise, do nothing, as
+ // another thread already did our dirty work.
+ if (PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES !=
+ static_cast<PlatformThreadLocalStorage::TLSKey>(
+ base::subtle::NoBarrier_CompareAndSwap(
+ &g_native_tls_key,
+ PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES, key))) {
+ // We've been shortcut. Another thread replaced g_native_tls_key first so
+ // we need to destroy our index and use the one the other thread got
+ // first.
+ PlatformThreadLocalStorage::FreeTLS(key);
+ key = base::subtle::NoBarrier_Load(&g_native_tls_key);
+ }
+ }
+ CHECK_EQ(GetTlsVectorStateAndValue(key), TlsVectorState::kUninitialized);
+
+ // Some allocators, such as TCMalloc, make use of thread local storage. As a
+ // result, any attempt to call new (or malloc) will lazily cause such a system
+ // to initialize, which will include registering for a TLS key. If we are not
+ // careful here, then that request to create a key will call new back, and
+ // we'll have an infinite loop. We avoid that as follows: Use a stack
+ // allocated vector, so that we don't have dependence on our allocator until
+ // our service is in place. (i.e., don't even call new until after we're
+ // setup)
+ TlsVectorEntry stack_allocated_tls_data[kThreadLocalStorageSize];
+ memset(stack_allocated_tls_data, 0, sizeof(stack_allocated_tls_data));
+ // Ensure that any rentrant calls change the temp version.
+ SetTlsVectorValue(key, stack_allocated_tls_data, TlsVectorState::kInUse);
+
+ // Allocate an array to store our data.
+ TlsVectorEntry* tls_data = new TlsVectorEntry[kThreadLocalStorageSize];
+ memcpy(tls_data, stack_allocated_tls_data, sizeof(stack_allocated_tls_data));
+ SetTlsVectorValue(key, tls_data, TlsVectorState::kInUse);
+ return tls_data;
+}
+
+void OnThreadExitInternal(TlsVectorEntry* tls_data) {
+ DCHECK(tls_data);
+ // Some allocators, such as TCMalloc, use TLS. As a result, when a thread
+ // terminates, one of the destructor calls we make may be to shut down an
+ // allocator. We have to be careful that after we've shutdown all of the known
+ // destructors (perchance including an allocator), that we don't call the
+ // allocator and cause it to resurrect itself (with no possibly destructor
+ // call to follow). We handle this problem as follows: Switch to using a stack
+ // allocated vector, so that we don't have dependence on our allocator after
+ // we have called all g_tls_metadata destructors. (i.e., don't even call
+ // delete[] after we're done with destructors.)
+ TlsVectorEntry stack_allocated_tls_data[kThreadLocalStorageSize];
+ memcpy(stack_allocated_tls_data, tls_data, sizeof(stack_allocated_tls_data));
+ // Ensure that any re-entrant calls change the temp version.
+ PlatformThreadLocalStorage::TLSKey key =
+ base::subtle::NoBarrier_Load(&g_native_tls_key);
+ SetTlsVectorValue(key, stack_allocated_tls_data, TlsVectorState::kDestroying);
+ delete[] tls_data; // Our last dependence on an allocator.
+
+ // Snapshot the TLS Metadata so we don't have to lock on every access.
+ TlsMetadata tls_metadata[kThreadLocalStorageSize];
+ {
+ base::AutoLock auto_lock(*GetTLSMetadataLock());
+ memcpy(tls_metadata, g_tls_metadata, sizeof(g_tls_metadata));
+ }
+
+ int remaining_attempts = kMaxDestructorIterations;
+ bool need_to_scan_destructors = true;
+ while (need_to_scan_destructors) {
+ need_to_scan_destructors = false;
+ // Try to destroy the first-created-slot (which is slot 1) in our last
+ // destructor call. That user was able to function, and define a slot with
+ // no other services running, so perhaps it is a basic service (like an
+ // allocator) and should also be destroyed last. If we get the order wrong,
+ // then we'll iterate several more times, so it is really not that critical
+ // (but it might help).
+ for (int slot = 0; slot < kThreadLocalStorageSize; ++slot) {
+ void* tls_value = stack_allocated_tls_data[slot].data;
+ if (!tls_value || tls_metadata[slot].status == TlsStatus::FREE ||
+ stack_allocated_tls_data[slot].version != tls_metadata[slot].version)
+ continue;
+
+ base::ThreadLocalStorage::TLSDestructorFunc destructor =
+ tls_metadata[slot].destructor;
+ if (!destructor)
+ continue;
+ stack_allocated_tls_data[slot].data = nullptr; // pre-clear the slot.
+ destructor(tls_value);
+ // Any destructor might have called a different service, which then set a
+ // different slot to a non-null value. Hence we need to check the whole
+ // vector again. This is a pthread standard.
+ need_to_scan_destructors = true;
+ }
+ if (--remaining_attempts <= 0) {
+ NOTREACHED(); // Destructors might not have been called.
+ break;
+ }
+ }
+
+ // Remove our stack allocated vector.
+ SetTlsVectorValue(key, nullptr, TlsVectorState::kDestroyed);
+}
+
+} // namespace
+
+namespace base {
+
+namespace internal {
+
+#if defined(OS_WIN)
+void PlatformThreadLocalStorage::OnThreadExit() {
+ PlatformThreadLocalStorage::TLSKey key =
+ base::subtle::NoBarrier_Load(&g_native_tls_key);
+ if (key == PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES)
+ return;
+ TlsVectorEntry* tls_vector = nullptr;
+ const TlsVectorState state = GetTlsVectorStateAndValue(key, &tls_vector);
+
+ // On Windows, thread destruction callbacks are only invoked once per module,
+ // so there should be no way that this could be invoked twice.
+ DCHECK_NE(state, TlsVectorState::kDestroyed);
+
+ // Maybe we have never initialized TLS for this thread.
+ if (state == TlsVectorState::kUninitialized)
+ return;
+ OnThreadExitInternal(tls_vector);
+}
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+void PlatformThreadLocalStorage::OnThreadExit(void* value) {
+ // On posix this function may be called twice. The first pass calls dtors and
+ // sets state to kDestroyed. The second pass sets kDestroyed to
+ // kUninitialized.
+ TlsVectorEntry* tls_vector = nullptr;
+ const TlsVectorState state = GetTlsVectorStateAndValue(value, &tls_vector);
+ if (state == TlsVectorState::kDestroyed) {
+ PlatformThreadLocalStorage::TLSKey key =
+ base::subtle::NoBarrier_Load(&g_native_tls_key);
+ SetTlsVectorValue(key, nullptr, TlsVectorState::kUninitialized);
+ return;
+ }
+
+ OnThreadExitInternal(tls_vector);
+}
+#endif // defined(OS_WIN)
+
+} // namespace internal
+
+// static
+bool ThreadLocalStorage::HasBeenDestroyed() {
+ PlatformThreadLocalStorage::TLSKey key =
+ base::subtle::NoBarrier_Load(&g_native_tls_key);
+ if (key == PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES)
+ return false;
+ const TlsVectorState state = GetTlsVectorStateAndValue(key);
+ return state == TlsVectorState::kDestroying ||
+ state == TlsVectorState::kDestroyed;
+}
+
+void ThreadLocalStorage::Slot::Initialize(TLSDestructorFunc destructor) {
+ PlatformThreadLocalStorage::TLSKey key =
+ base::subtle::NoBarrier_Load(&g_native_tls_key);
+ if (key == PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES ||
+ GetTlsVectorStateAndValue(key) == TlsVectorState::kUninitialized) {
+ ConstructTlsVector();
+ }
+
+ // Grab a new slot.
+ {
+ base::AutoLock auto_lock(*GetTLSMetadataLock());
+ for (int i = 0; i < kThreadLocalStorageSize; ++i) {
+ // Tracking the last assigned slot is an attempt to find the next
+ // available slot within one iteration. Under normal usage, slots remain
+ // in use for the lifetime of the process (otherwise before we reclaimed
+ // slots, we would have run out of slots). This makes it highly likely the
+ // next slot is going to be a free slot.
+ size_t slot_candidate =
+ (g_last_assigned_slot + 1 + i) % kThreadLocalStorageSize;
+ if (g_tls_metadata[slot_candidate].status == TlsStatus::FREE) {
+ g_tls_metadata[slot_candidate].status = TlsStatus::IN_USE;
+ g_tls_metadata[slot_candidate].destructor = destructor;
+ g_last_assigned_slot = slot_candidate;
+ DCHECK_EQ(kInvalidSlotValue, slot_);
+ slot_ = slot_candidate;
+ version_ = g_tls_metadata[slot_candidate].version;
+ break;
+ }
+ }
+ }
+ CHECK_NE(slot_, kInvalidSlotValue);
+ CHECK_LT(slot_, kThreadLocalStorageSize);
+}
+
+void ThreadLocalStorage::Slot::Free() {
+ DCHECK_NE(slot_, kInvalidSlotValue);
+ DCHECK_LT(slot_, kThreadLocalStorageSize);
+ {
+ base::AutoLock auto_lock(*GetTLSMetadataLock());
+ g_tls_metadata[slot_].status = TlsStatus::FREE;
+ g_tls_metadata[slot_].destructor = nullptr;
+ ++(g_tls_metadata[slot_].version);
+ }
+ slot_ = kInvalidSlotValue;
+}
+
+void* ThreadLocalStorage::Slot::Get() const {
+ TlsVectorEntry* tls_data = nullptr;
+ const TlsVectorState state = GetTlsVectorStateAndValue(
+ base::subtle::NoBarrier_Load(&g_native_tls_key), &tls_data);
+ DCHECK_NE(state, TlsVectorState::kDestroyed);
+ if (!tls_data)
+ return nullptr;
+ DCHECK_NE(slot_, kInvalidSlotValue);
+ DCHECK_LT(slot_, kThreadLocalStorageSize);
+ // Version mismatches means this slot was previously freed.
+ if (tls_data[slot_].version != version_)
+ return nullptr;
+ return tls_data[slot_].data;
+}
+
+void ThreadLocalStorage::Slot::Set(void* value) {
+ TlsVectorEntry* tls_data = nullptr;
+ const TlsVectorState state = GetTlsVectorStateAndValue(
+ base::subtle::NoBarrier_Load(&g_native_tls_key), &tls_data);
+ DCHECK_NE(state, TlsVectorState::kDestroyed);
+ if (!tls_data) {
+ if (!value)
+ return;
+ tls_data = ConstructTlsVector();
+ }
+ DCHECK_NE(slot_, kInvalidSlotValue);
+ DCHECK_LT(slot_, kThreadLocalStorageSize);
+ tls_data[slot_].data = value;
+ tls_data[slot_].version = version_;
+}
+
+ThreadLocalStorage::Slot::Slot(TLSDestructorFunc destructor) {
+ Initialize(destructor);
+}
+
+ThreadLocalStorage::Slot::~Slot() {
+ Free();
+}
+
+} // namespace base
diff --git a/security/sandbox/chromium/base/threading/thread_local_storage.h b/security/sandbox/chromium/base/threading/thread_local_storage.h
new file mode 100644
index 0000000000..73b845ef56
--- /dev/null
+++ b/security/sandbox/chromium/base/threading/thread_local_storage.h
@@ -0,0 +1,175 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_THREADING_THREAD_LOCAL_STORAGE_H_
+#define BASE_THREADING_THREAD_LOCAL_STORAGE_H_
+
+#include <stdint.h>
+
+#include "base/atomicops.h"
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include "base/win/windows_types.h"
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+#include <pthread.h>
+#endif
+
+namespace ui {
+class TLSDestructionCheckerForX11;
+}
+
+namespace base {
+
+class SamplingHeapProfiler;
+
+namespace debug {
+class GlobalActivityTracker;
+} // namespace debug
+
+namespace trace_event {
+class MallocDumpProvider;
+} // namespace trace_event
+
+namespace internal {
+
+class ThreadLocalStorageTestInternal;
+
+// WARNING: You should *NOT* use this class directly.
+// PlatformThreadLocalStorage is a low-level abstraction of the OS's TLS
+// interface. Instead, you should use one of the following:
+// * ThreadLocalBoolean (from thread_local.h) for booleans.
+// * ThreadLocalPointer (from thread_local.h) for pointers.
+// * ThreadLocalStorage::StaticSlot/Slot for more direct control of the slot.
+class BASE_EXPORT PlatformThreadLocalStorage {
+ public:
+
+#if defined(OS_WIN)
+ typedef unsigned long TLSKey;
+ enum : unsigned { TLS_KEY_OUT_OF_INDEXES = TLS_OUT_OF_INDEXES };
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+ typedef pthread_key_t TLSKey;
+ // The following is a "reserved key" which is used in our generic Chromium
+ // ThreadLocalStorage implementation. We expect that an OS will not return
+ // such a key, but if it is returned (i.e., the OS tries to allocate it) we
+ // will just request another key.
+ enum { TLS_KEY_OUT_OF_INDEXES = 0x7FFFFFFF };
+#endif
+
+ // The following methods need to be supported on each OS platform, so that
+ // the Chromium ThreadLocalStore functionality can be constructed.
+ // Chromium will use these methods to acquire a single OS slot, and then use
+ // that to support a much larger number of Chromium slots (independent of the
+ // OS restrictions).
+ // The following returns true if it successfully is able to return an OS
+ // key in |key|.
+ static bool AllocTLS(TLSKey* key);
+ // Note: FreeTLS() doesn't have to be called, it is fine with this leak, OS
+ // might not reuse released slot, you might just reset the TLS value with
+ // SetTLSValue().
+ static void FreeTLS(TLSKey key);
+ static void SetTLSValue(TLSKey key, void* value);
+ static void* GetTLSValue(TLSKey key) {
+#if defined(OS_WIN)
+ return TlsGetValue(key);
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+ return pthread_getspecific(key);
+#endif
+ }
+
+ // Each platform (OS implementation) is required to call this method on each
+ // terminating thread when the thread is about to terminate. This method
+ // will then call all registered destructors for slots in Chromium
+ // ThreadLocalStorage, until there are no slot values remaining as having
+ // been set on this thread.
+ // Destructors may end up being called multiple times on a terminating
+ // thread, as other destructors may re-set slots that were previously
+ // destroyed.
+#if defined(OS_WIN)
+ // Since Windows which doesn't support TLS destructor, the implementation
+ // should use GetTLSValue() to retrieve the value of TLS slot.
+ static void OnThreadExit();
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+ // |Value| is the data stored in TLS slot, The implementation can't use
+ // GetTLSValue() to retrieve the value of slot as it has already been reset
+ // in Posix.
+ static void OnThreadExit(void* value);
+#endif
+};
+
+} // namespace internal
+
+// Wrapper for thread local storage. This class doesn't do much except provide
+// an API for portability.
+class BASE_EXPORT ThreadLocalStorage {
+ public:
+ // Prototype for the TLS destructor function, which can be optionally used to
+ // cleanup thread local storage on thread exit. 'value' is the data that is
+ // stored in thread local storage.
+ typedef void (*TLSDestructorFunc)(void* value);
+
+ // A key representing one value stored in TLS. Use as a class member or a
+ // local variable. If you need a static storage duration variable, use the
+ // following pattern with a NoDestructor<Slot>:
+ // void MyDestructorFunc(void* value);
+ // ThreadLocalStorage::Slot& ImportantContentTLS() {
+ // static NoDestructor<ThreadLocalStorage::Slot> important_content_tls(
+ // &MyDestructorFunc);
+ // return *important_content_tls;
+ // }
+ class BASE_EXPORT Slot final {
+ public:
+ // |destructor| is a pointer to a function to perform per-thread cleanup of
+ // this object. If set to nullptr, no cleanup is done for this TLS slot.
+ explicit Slot(TLSDestructorFunc destructor = nullptr);
+ // If a destructor was set for this slot, removes the destructor so that
+ // remaining threads exiting will not free data.
+ ~Slot();
+
+ // Get the thread-local value stored in slot 'slot'.
+ // Values are guaranteed to initially be zero.
+ void* Get() const;
+
+ // Set the thread-local value stored in slot 'slot' to
+ // value 'value'.
+ void Set(void* value);
+
+ private:
+ void Initialize(TLSDestructorFunc destructor);
+ void Free();
+
+ static constexpr int kInvalidSlotValue = -1;
+ int slot_ = kInvalidSlotValue;
+ uint32_t version_ = 0;
+
+ DISALLOW_COPY_AND_ASSIGN(Slot);
+ };
+
+ private:
+ // In most cases, most callers should not need access to HasBeenDestroyed().
+ // If you are working in code that runs during thread destruction, contact the
+ // base OWNERs for advice and then make a friend request.
+ //
+ // Returns |true| if Chrome's implementation of TLS is being or has been
+ // destroyed during thread destruction. Attempting to call Slot::Get() during
+ // destruction is disallowed and will hit a DCHECK. Any code that relies on
+ // TLS during thread destruction must first check this method before calling
+ // Slot::Get().
+ friend class SequenceCheckerImpl;
+ friend class SamplingHeapProfiler;
+ friend class ThreadCheckerImpl;
+ friend class internal::ThreadLocalStorageTestInternal;
+ friend class trace_event::MallocDumpProvider;
+ friend class debug::GlobalActivityTracker;
+ friend class ui::TLSDestructionCheckerForX11;
+ static bool HasBeenDestroyed();
+
+ DISALLOW_COPY_AND_ASSIGN(ThreadLocalStorage);
+};
+
+} // namespace base
+
+#endif // BASE_THREADING_THREAD_LOCAL_STORAGE_H_
diff --git a/security/sandbox/chromium/base/threading/thread_local_storage_posix.cc b/security/sandbox/chromium/base/threading/thread_local_storage_posix.cc
new file mode 100644
index 0000000000..89edeee1d2
--- /dev/null
+++ b/security/sandbox/chromium/base/threading/thread_local_storage_posix.cc
@@ -0,0 +1,30 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/thread_local_storage.h"
+
+#include "base/logging.h"
+
+namespace base {
+
+namespace internal {
+
+bool PlatformThreadLocalStorage::AllocTLS(TLSKey* key) {
+ return !pthread_key_create(key,
+ base::internal::PlatformThreadLocalStorage::OnThreadExit);
+}
+
+void PlatformThreadLocalStorage::FreeTLS(TLSKey key) {
+ int ret = pthread_key_delete(key);
+ DCHECK_EQ(ret, 0);
+}
+
+void PlatformThreadLocalStorage::SetTLSValue(TLSKey key, void* value) {
+ int ret = pthread_setspecific(key, value);
+ DCHECK_EQ(ret, 0);
+}
+
+} // namespace internal
+
+} // namespace base
diff --git a/security/sandbox/chromium/base/threading/thread_local_storage_win.cc b/security/sandbox/chromium/base/threading/thread_local_storage_win.cc
new file mode 100644
index 0000000000..a9aec31da5
--- /dev/null
+++ b/security/sandbox/chromium/base/threading/thread_local_storage_win.cc
@@ -0,0 +1,107 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/thread_local_storage.h"
+
+#include <windows.h>
+
+#include "base/logging.h"
+
+namespace base {
+
+namespace internal {
+
+bool PlatformThreadLocalStorage::AllocTLS(TLSKey* key) {
+ TLSKey value = TlsAlloc();
+ if (value != TLS_OUT_OF_INDEXES) {
+ *key = value;
+ return true;
+ }
+ return false;
+}
+
+void PlatformThreadLocalStorage::FreeTLS(TLSKey key) {
+ BOOL ret = TlsFree(key);
+ DCHECK(ret);
+}
+
+void PlatformThreadLocalStorage::SetTLSValue(TLSKey key, void* value) {
+ BOOL ret = TlsSetValue(key, value);
+ DCHECK(ret);
+}
+
+} // namespace internal
+
+} // namespace base
+
+// Thread Termination Callbacks.
+// Windows doesn't support a per-thread destructor with its
+// TLS primitives. So, we build it manually by inserting a
+// function to be called on each thread's exit.
+// This magic is from http://www.codeproject.com/threads/tls.asp
+// and it works for VC++ 7.0 and later.
+
+// Force a reference to _tls_used to make the linker create the TLS directory
+// if it's not already there. (e.g. if __declspec(thread) is not used).
+// Force a reference to p_thread_callback_base to prevent whole program
+// optimization from discarding the variable.
+#ifdef _WIN64
+
+#pragma comment(linker, "/INCLUDE:_tls_used")
+#pragma comment(linker, "/INCLUDE:p_thread_callback_base")
+
+#else // _WIN64
+
+#pragma comment(linker, "/INCLUDE:__tls_used")
+#pragma comment(linker, "/INCLUDE:_p_thread_callback_base")
+
+#endif // _WIN64
+
+// Static callback function to call with each thread termination.
+void NTAPI OnThreadExit(PVOID module, DWORD reason, PVOID reserved) {
+ // On XP SP0 & SP1, the DLL_PROCESS_ATTACH is never seen. It is sent on SP2+
+ // and on W2K and W2K3. So don't assume it is sent.
+ if (DLL_THREAD_DETACH == reason || DLL_PROCESS_DETACH == reason)
+ base::internal::PlatformThreadLocalStorage::OnThreadExit();
+}
+
+// .CRT$XLA to .CRT$XLZ is an array of PIMAGE_TLS_CALLBACK pointers that are
+// called automatically by the OS loader code (not the CRT) when the module is
+// loaded and on thread creation. They are NOT called if the module has been
+// loaded by a LoadLibrary() call. It must have implicitly been loaded at
+// process startup.
+// By implicitly loaded, I mean that it is directly referenced by the main EXE
+// or by one of its dependent DLLs. Delay-loaded DLL doesn't count as being
+// implicitly loaded.
+//
+// See VC\crt\src\tlssup.c for reference.
+
+// extern "C" suppresses C++ name mangling so we know the symbol name for the
+// linker /INCLUDE:symbol pragma above.
+extern "C" {
+// The linker must not discard p_thread_callback_base. (We force a reference
+// to this variable with a linker /INCLUDE:symbol pragma to ensure that.) If
+// this variable is discarded, the OnThreadExit function will never be called.
+#ifdef _WIN64
+
+// .CRT section is merged with .rdata on x64 so it must be constant data.
+#pragma const_seg(".CRT$XLB")
+// When defining a const variable, it must have external linkage to be sure the
+// linker doesn't discard it.
+extern const PIMAGE_TLS_CALLBACK p_thread_callback_base;
+const PIMAGE_TLS_CALLBACK p_thread_callback_base = OnThreadExit;
+
+// Reset the default section.
+#pragma const_seg()
+
+#else // _WIN64
+
+#pragma data_seg(".CRT$XLB")
+PIMAGE_TLS_CALLBACK p_thread_callback_base = OnThreadExit;
+
+// Reset the default section.
+#pragma data_seg()
+
+#endif // _WIN64
+} // extern "C"
diff --git a/security/sandbox/chromium/base/threading/thread_restrictions.cc b/security/sandbox/chromium/base/threading/thread_restrictions.cc
new file mode 100644
index 0000000000..75c37eab4f
--- /dev/null
+++ b/security/sandbox/chromium/base/threading/thread_restrictions.cc
@@ -0,0 +1,258 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/thread_restrictions.h"
+
+#if DCHECK_IS_ON()
+
+#include "base/debug/stack_trace.h"
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/threading/thread_local.h"
+#include "build/build_config.h"
+
+namespace base {
+
+std::ostream& operator<<(std::ostream&out, const ThreadLocalBoolean& tl) {
+ out << "currently set to " << (tl.Get() ? "true" : "false");
+ return out;
+}
+
+namespace {
+
+#if defined(OS_NACL) || defined(OS_ANDROID)
+// NaCL doesn't support stack sampling and Android is slow at stack
+// sampling and this causes timeouts (crbug.com/959139).
+using ThreadLocalBooleanWithStacks = ThreadLocalBoolean;
+#else
+class ThreadLocalBooleanWithStacks {
+ public:
+ ThreadLocalBooleanWithStacks() = default;
+
+ bool Get() const { return bool_.Get(); }
+
+ void Set(bool val) {
+ stack_.Set(std::make_unique<debug::StackTrace>());
+ bool_.Set(val);
+ }
+
+ friend std::ostream& operator<<(std::ostream& out,
+ const ThreadLocalBooleanWithStacks& tl) {
+ out << tl.bool_ << " by ";
+
+ if (!tl.stack_.Get())
+ return out << "default value\n";
+ out << "\n";
+ tl.stack_.Get()->OutputToStream(&out);
+ return out;
+ }
+
+ private:
+ ThreadLocalBoolean bool_;
+ ThreadLocalOwnedPointer<debug::StackTrace> stack_;
+
+ DISALLOW_COPY_AND_ASSIGN(ThreadLocalBooleanWithStacks);
+};
+#endif // defined(OS_NACL)
+
+LazyInstance<ThreadLocalBooleanWithStacks>::Leaky g_blocking_disallowed =
+ LAZY_INSTANCE_INITIALIZER;
+
+LazyInstance<ThreadLocalBooleanWithStacks>::Leaky g_singleton_disallowed =
+ LAZY_INSTANCE_INITIALIZER;
+
+LazyInstance<ThreadLocalBooleanWithStacks>::Leaky
+ g_base_sync_primitives_disallowed = LAZY_INSTANCE_INITIALIZER;
+
+LazyInstance<ThreadLocalBooleanWithStacks>::Leaky
+ g_cpu_intensive_work_disallowed = LAZY_INSTANCE_INITIALIZER;
+
+} // namespace
+
+namespace internal {
+
+void AssertBlockingAllowed() {
+ DCHECK(!g_blocking_disallowed.Get().Get())
+ << "Function marked as blocking was called from a scope that disallows "
+ "blocking! If this task is running inside the ThreadPool, it needs "
+ "to have MayBlock() in its TaskTraits. Otherwise, consider making "
+ "this blocking work asynchronous or, as a last resort, you may use "
+ "ScopedAllowBlocking (see its documentation for best practices).\n"
+ << "g_blocking_disallowed " << g_blocking_disallowed.Get();
+}
+
+} // namespace internal
+
+void DisallowBlocking() {
+ g_blocking_disallowed.Get().Set(true);
+}
+
+ScopedDisallowBlocking::ScopedDisallowBlocking()
+ : was_disallowed_(g_blocking_disallowed.Get().Get()) {
+ g_blocking_disallowed.Get().Set(true);
+}
+
+ScopedDisallowBlocking::~ScopedDisallowBlocking() {
+ DCHECK(g_blocking_disallowed.Get().Get());
+ g_blocking_disallowed.Get().Set(was_disallowed_);
+}
+
+ScopedAllowBlocking::ScopedAllowBlocking()
+ : was_disallowed_(g_blocking_disallowed.Get().Get()) {
+ g_blocking_disallowed.Get().Set(false);
+}
+
+ScopedAllowBlocking::~ScopedAllowBlocking() {
+ DCHECK(!g_blocking_disallowed.Get().Get());
+ g_blocking_disallowed.Get().Set(was_disallowed_);
+}
+
+void DisallowBaseSyncPrimitives() {
+ g_base_sync_primitives_disallowed.Get().Set(true);
+}
+
+ScopedAllowBaseSyncPrimitives::ScopedAllowBaseSyncPrimitives()
+ : was_disallowed_(g_base_sync_primitives_disallowed.Get().Get()) {
+ DCHECK(!g_blocking_disallowed.Get().Get())
+ << "To allow //base sync primitives in a scope where blocking is "
+ "disallowed use ScopedAllowBaseSyncPrimitivesOutsideBlockingScope.\n"
+ << "g_blocking_disallowed " << g_blocking_disallowed.Get();
+ g_base_sync_primitives_disallowed.Get().Set(false);
+}
+
+ScopedAllowBaseSyncPrimitives::~ScopedAllowBaseSyncPrimitives() {
+ DCHECK(!g_base_sync_primitives_disallowed.Get().Get());
+ g_base_sync_primitives_disallowed.Get().Set(was_disallowed_);
+}
+
+ScopedAllowBaseSyncPrimitivesOutsideBlockingScope::
+ ScopedAllowBaseSyncPrimitivesOutsideBlockingScope()
+ : was_disallowed_(g_base_sync_primitives_disallowed.Get().Get()) {
+ g_base_sync_primitives_disallowed.Get().Set(false);
+}
+
+ScopedAllowBaseSyncPrimitivesOutsideBlockingScope::
+ ~ScopedAllowBaseSyncPrimitivesOutsideBlockingScope() {
+ DCHECK(!g_base_sync_primitives_disallowed.Get().Get());
+ g_base_sync_primitives_disallowed.Get().Set(was_disallowed_);
+}
+
+ScopedAllowBaseSyncPrimitivesForTesting::
+ ScopedAllowBaseSyncPrimitivesForTesting()
+ : was_disallowed_(g_base_sync_primitives_disallowed.Get().Get()) {
+ g_base_sync_primitives_disallowed.Get().Set(false);
+}
+
+ScopedAllowBaseSyncPrimitivesForTesting::
+ ~ScopedAllowBaseSyncPrimitivesForTesting() {
+ DCHECK(!g_base_sync_primitives_disallowed.Get().Get());
+ g_base_sync_primitives_disallowed.Get().Set(was_disallowed_);
+}
+
+ScopedAllowUnresponsiveTasksForTesting::ScopedAllowUnresponsiveTasksForTesting()
+ : was_disallowed_base_sync_(g_base_sync_primitives_disallowed.Get().Get()),
+ was_disallowed_blocking_(g_blocking_disallowed.Get().Get()),
+ was_disallowed_cpu_(g_cpu_intensive_work_disallowed.Get().Get()) {
+ g_base_sync_primitives_disallowed.Get().Set(false);
+ g_blocking_disallowed.Get().Set(false);
+ g_cpu_intensive_work_disallowed.Get().Set(false);
+}
+
+ScopedAllowUnresponsiveTasksForTesting::
+ ~ScopedAllowUnresponsiveTasksForTesting() {
+ DCHECK(!g_base_sync_primitives_disallowed.Get().Get());
+ DCHECK(!g_blocking_disallowed.Get().Get());
+ DCHECK(!g_cpu_intensive_work_disallowed.Get().Get());
+ g_base_sync_primitives_disallowed.Get().Set(was_disallowed_base_sync_);
+ g_blocking_disallowed.Get().Set(was_disallowed_blocking_);
+ g_cpu_intensive_work_disallowed.Get().Set(was_disallowed_cpu_);
+}
+
+namespace internal {
+
+void AssertBaseSyncPrimitivesAllowed() {
+ DCHECK(!g_base_sync_primitives_disallowed.Get().Get())
+ << "Waiting on a //base sync primitive is not allowed on this thread to "
+ "prevent jank and deadlock. If waiting on a //base sync primitive is "
+ "unavoidable, do it within the scope of a "
+ "ScopedAllowBaseSyncPrimitives. If in a test, "
+ "use ScopedAllowBaseSyncPrimitivesForTesting.\n"
+ << "g_base_sync_primitives_disallowed "
+ << g_base_sync_primitives_disallowed.Get()
+ << "It can be useful to know that g_blocking_disallowed is "
+ << g_blocking_disallowed.Get();
+}
+
+void ResetThreadRestrictionsForTesting() {
+ g_blocking_disallowed.Get().Set(false);
+ g_singleton_disallowed.Get().Set(false);
+ g_base_sync_primitives_disallowed.Get().Set(false);
+ g_cpu_intensive_work_disallowed.Get().Set(false);
+}
+
+} // namespace internal
+
+void AssertLongCPUWorkAllowed() {
+ DCHECK(!g_cpu_intensive_work_disallowed.Get().Get())
+ << "Function marked as CPU intensive was called from a scope that "
+ "disallows this kind of work! Consider making this work "
+ "asynchronous.\n"
+ << "g_cpu_intensive_work_disallowed "
+ << g_cpu_intensive_work_disallowed.Get();
+}
+
+void DisallowUnresponsiveTasks() {
+ DisallowBlocking();
+ DisallowBaseSyncPrimitives();
+ g_cpu_intensive_work_disallowed.Get().Set(true);
+}
+
+ThreadRestrictions::ScopedAllowIO::ScopedAllowIO()
+ : was_allowed_(SetIOAllowed(true)) {}
+
+ThreadRestrictions::ScopedAllowIO::~ScopedAllowIO() {
+ SetIOAllowed(was_allowed_);
+}
+
+// static
+bool ThreadRestrictions::SetIOAllowed(bool allowed) {
+ bool previous_disallowed = g_blocking_disallowed.Get().Get();
+ g_blocking_disallowed.Get().Set(!allowed);
+ return !previous_disallowed;
+}
+
+// static
+bool ThreadRestrictions::SetSingletonAllowed(bool allowed) {
+ bool previous_disallowed = g_singleton_disallowed.Get().Get();
+ g_singleton_disallowed.Get().Set(!allowed);
+ return !previous_disallowed;
+}
+
+// static
+void ThreadRestrictions::AssertSingletonAllowed() {
+ DCHECK(!g_singleton_disallowed.Get().Get())
+ << "LazyInstance/Singleton is not allowed to be used on this thread. "
+ "Most likely it's because this thread is not joinable (or the current "
+ "task is running with TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN "
+ "semantics), so AtExitManager may have deleted the object on "
+ "shutdown, leading to a potential shutdown crash. If you need to use "
+ "the object from this context, it'll have to be updated to use Leaky "
+ "traits.\n"
+ << "g_singleton_disallowed " << g_singleton_disallowed.Get();
+}
+
+// static
+void ThreadRestrictions::DisallowWaiting() {
+ DisallowBaseSyncPrimitives();
+}
+
+bool ThreadRestrictions::SetWaitAllowed(bool allowed) {
+ bool previous_disallowed = g_base_sync_primitives_disallowed.Get().Get();
+ g_base_sync_primitives_disallowed.Get().Set(!allowed);
+ return !previous_disallowed;
+}
+
+} // namespace base
+
+#endif // DCHECK_IS_ON()
diff --git a/security/sandbox/chromium/base/threading/thread_restrictions.h b/security/sandbox/chromium/base/threading/thread_restrictions.h
new file mode 100644
index 0000000000..55047c5b40
--- /dev/null
+++ b/security/sandbox/chromium/base/threading/thread_restrictions.h
@@ -0,0 +1,680 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_THREADING_THREAD_RESTRICTIONS_H_
+#define BASE_THREADING_THREAD_RESTRICTIONS_H_
+
+#include "base/base_export.h"
+#include "base/gtest_prod_util.h"
+#include "base/logging.h"
+#include "base/macros.h"
+
+// -----------------------------------------------------------------------------
+// Usage documentation
+// -----------------------------------------------------------------------------
+//
+// Overview:
+// This file exposes functions to ban and allow certain slow operations
+// on a per-thread basis. To annotate *usage* of such slow operations, refer to
+// scoped_blocking_call.h instead.
+//
+// Specific allowances that can be controlled in this file are:
+// - Blocking call: Refers to any call that causes the calling thread to wait
+// off-CPU. It includes but is not limited to calls that wait on synchronous
+// file I/O operations: read or write a file from disk, interact with a pipe
+// or a socket, rename or delete a file, enumerate files in a directory, etc.
+// Acquiring a low contention lock is not considered a blocking call.
+//
+// - Waiting on a //base sync primitive: Refers to calling one of these methods:
+// - base::WaitableEvent::*Wait*
+// - base::ConditionVariable::*Wait*
+// - base::Process::WaitForExit*
+//
+// - Long CPU work: Refers to any code that takes more than 100 ms to
+// run when there is no CPU contention and no hard page faults and therefore,
+// is not suitable to run on a thread required to keep the browser responsive
+// (where jank could be visible to the user).
+//
+// The following disallowance functions are offered:
+// - DisallowBlocking(): Disallows blocking calls on the current thread.
+// - DisallowBaseSyncPrimitives(): Disallows waiting on a //base sync primitive
+// on the current thread.
+// - DisallowUnresponsiveTasks() Disallows blocking calls, waiting on a //base
+// sync primitive, and long cpu work on the current thread.
+//
+// In addition, scoped-allowance mechanisms are offered to make an exception
+// within a scope for a behavior that is normally disallowed.
+// - ScopedAllowBlocking(ForTesting): Allows blocking calls.
+// - ScopedAllowBaseSyncPrimitives(ForTesting)(OutsideBlockingScope): Allow
+// waiting on a //base sync primitive. The OutsideBlockingScope suffix allows
+// uses in a scope where blocking is also disallowed.
+//
+// Avoid using allowances outside of unit tests. In unit tests, use allowances
+// with the suffix "ForTesting".
+//
+// Prefer making blocking calls from tasks posted to base::ThreadPoolInstance
+// with base::MayBlock().
+//
+// Instead of waiting on a WaitableEvent or a ConditionVariable, prefer putting
+// the work that should happen after the wait in a continuation callback and
+// post it from where the WaitableEvent or ConditionVariable would have been
+// signaled. If something needs to be scheduled after many tasks have executed,
+// use base::BarrierClosure.
+//
+// On Windows, join processes asynchronously using base::win::ObjectWatcher.
+//
+// Where unavoidable, put ScopedAllow* instances in the narrowest scope possible
+// in the caller making the blocking call but no further down. For example: if a
+// Cleanup() method needs to do a blocking call, document Cleanup() as blocking
+// and add a ScopedAllowBlocking instance in callers that can't avoid making
+// this call from a context where blocking is banned, as such:
+//
+// void Client::MyMethod() {
+// (...)
+// {
+// // Blocking is okay here because XYZ.
+// ScopedAllowBlocking allow_blocking;
+// my_foo_->Cleanup();
+// }
+// (...)
+// }
+//
+// // This method can block.
+// void Foo::Cleanup() {
+// // Do NOT add the ScopedAllowBlocking in Cleanup() directly as that hides
+// // its blocking nature from unknowing callers and defeats the purpose of
+// // these checks.
+// FlushStateToDisk();
+// }
+//
+// Note: In rare situations where the blocking call is an implementation detail
+// (i.e. the impl makes a call that invokes AssertBlockingAllowed() but it
+// somehow knows that in practice this will not block), it might be okay to hide
+// the ScopedAllowBlocking instance in the impl with a comment explaining why
+// that's okay.
+
+class BrowserProcessImpl;
+class HistogramSynchronizer;
+class KeyStorageLinux;
+class NativeBackendKWallet;
+class NativeDesktopMediaList;
+class StartupTimeBomb;
+
+namespace android_webview {
+class AwFormDatabaseService;
+class CookieManager;
+class ScopedAllowInitGLBindings;
+class VizCompositorThreadRunnerWebView;
+}
+namespace audio {
+class OutputDevice;
+}
+namespace blink {
+class RTCVideoDecoderAdapter;
+class RTCVideoEncoder;
+class SourceStream;
+class VideoFrameResourceProvider;
+class WorkerThread;
+namespace scheduler {
+class WorkerThread;
+}
+}
+namespace cc {
+class CompletionEvent;
+class TileTaskManagerImpl;
+}
+namespace chromeos {
+class BlockingMethodCaller;
+namespace system {
+class StatisticsProviderImpl;
+}
+}
+namespace chrome_browser_net {
+class Predictor;
+}
+namespace chrome_cleaner {
+class SystemReportComponent;
+}
+namespace content {
+class BrowserGpuChannelHostFactory;
+class BrowserMainLoop;
+class BrowserProcessSubThread;
+class BrowserShutdownProfileDumper;
+class BrowserTestBase;
+class CategorizedWorkerPool;
+class DesktopCaptureDevice;
+class InProcessUtilityThread;
+class NestedMessagePumpAndroid;
+class RenderProcessHostImpl;
+class RenderWidgetHostViewMac;
+class RTCVideoDecoder;
+class SandboxHostLinux;
+class ScopedAllowWaitForDebugURL;
+class ServiceWorkerContextClient;
+class SoftwareOutputDeviceMus;
+class SynchronousCompositor;
+class SynchronousCompositorHost;
+class SynchronousCompositorSyncCallBridge;
+class TextInputClientMac;
+class WebContentsViewMac;
+} // namespace content
+namespace cronet {
+class CronetPrefsManager;
+class CronetURLRequestContext;
+} // namespace cronet
+namespace dbus {
+class Bus;
+}
+namespace disk_cache {
+class BackendImpl;
+class InFlightIO;
+}
+namespace functions {
+class ExecScriptScopedAllowBaseSyncPrimitives;
+}
+namespace history_report {
+class HistoryReportJniBridge;
+}
+namespace gpu {
+class GpuChannelHost;
+}
+namespace leveldb_env {
+class DBTracker;
+}
+namespace media {
+class AudioInputDevice;
+class AudioOutputDevice;
+class BlockingUrlProtocol;
+class PaintCanvasVideoRenderer;
+}
+namespace memory_instrumentation {
+class OSMetrics;
+}
+namespace midi {
+class TaskService; // https://crbug.com/796830
+}
+namespace module_installer {
+class ScopedAllowModulePakLoad;
+}
+namespace mojo {
+class CoreLibraryInitializer;
+class SyncCallRestrictions;
+namespace core {
+class ScopedIPCSupport;
+}
+}
+namespace printing {
+class PrintJobWorker;
+class PrinterQuery;
+}
+namespace rlz_lib {
+class FinancialPing;
+}
+namespace syncer {
+class GetLocalChangesRequest;
+class HttpBridge;
+class ModelSafeWorker;
+}
+namespace ui {
+class CommandBufferClientImpl;
+class CommandBufferLocal;
+class GpuState;
+class MaterialDesignController;
+}
+namespace weblayer {
+class WebLayerPathProvider;
+}
+namespace net {
+class MultiThreadedCertVerifierScopedAllowBaseSyncPrimitives;
+class MultiThreadedProxyResolverScopedAllowJoinOnIO;
+class NetworkChangeNotifierMac;
+class NetworkConfigWatcherMacThread;
+namespace internal {
+class AddressTrackerLinux;
+}
+}
+
+namespace proxy_resolver {
+class ScopedAllowThreadJoinForProxyResolverV8Tracing;
+}
+
+namespace remoting {
+class AutoThread;
+namespace protocol {
+class ScopedAllowThreadJoinForWebRtcTransport;
+}
+}
+
+namespace resource_coordinator {
+class TabManagerDelegate;
+}
+
+namespace service_manager {
+class ServiceProcessLauncher;
+}
+
+namespace shell_integration_linux {
+class LaunchXdgUtilityScopedAllowBaseSyncPrimitives;
+}
+
+namespace ui {
+class WindowResizeHelperMac;
+}
+
+namespace viz {
+class HostGpuMemoryBufferManager;
+}
+
+namespace vr {
+class VrShell;
+}
+
+namespace web {
+class WebMainLoop;
+class WebSubThread;
+}
+
+namespace weblayer {
+class ProfileImpl;
+}
+
+namespace webrtc {
+class DesktopConfigurationMonitor;
+}
+
+namespace base {
+
+namespace sequence_manager {
+namespace internal {
+class TaskQueueImpl;
+}
+} // namespace sequence_manager
+
+namespace android {
+class JavaHandlerThread;
+}
+
+namespace internal {
+class JobTaskSource;
+class TaskTracker;
+}
+
+class AdjustOOMScoreHelper;
+class FileDescriptorWatcher;
+class GetAppOutputScopedAllowBaseSyncPrimitives;
+class ScopedAllowThreadRecallForStackSamplingProfiler;
+class SimpleThread;
+class StackSamplingProfiler;
+class Thread;
+
+#if DCHECK_IS_ON()
+#define INLINE_IF_DCHECK_IS_OFF BASE_EXPORT
+#define EMPTY_BODY_IF_DCHECK_IS_OFF
+#else
+#define INLINE_IF_DCHECK_IS_OFF inline
+
+// The static_assert() eats follow-on semicolons. `= default` would work
+// too, but it makes clang realize that all the Scoped classes are no-ops in
+// non-dcheck builds and it starts emitting many -Wunused-variable warnings.
+#define EMPTY_BODY_IF_DCHECK_IS_OFF \
+ {} \
+ static_assert(true, "")
+#endif
+
+namespace internal {
+
+// Asserts that blocking calls are allowed in the current scope. This is an
+// internal call, external code should use ScopedBlockingCall instead, which
+// serves as a precise annotation of the scope that may/will block.
+INLINE_IF_DCHECK_IS_OFF void AssertBlockingAllowed()
+ EMPTY_BODY_IF_DCHECK_IS_OFF;
+
+} // namespace internal
+
+// Disallows blocking on the current thread.
+INLINE_IF_DCHECK_IS_OFF void DisallowBlocking() EMPTY_BODY_IF_DCHECK_IS_OFF;
+
+// Disallows blocking calls within its scope.
+class BASE_EXPORT ScopedDisallowBlocking {
+ public:
+ ScopedDisallowBlocking() EMPTY_BODY_IF_DCHECK_IS_OFF;
+ ~ScopedDisallowBlocking() EMPTY_BODY_IF_DCHECK_IS_OFF;
+
+ private:
+#if DCHECK_IS_ON()
+ const bool was_disallowed_;
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedDisallowBlocking);
+};
+
+class BASE_EXPORT ScopedAllowBlocking {
+ private:
+ FRIEND_TEST_ALL_PREFIXES(ThreadRestrictionsTest, ScopedAllowBlocking);
+ friend class ScopedAllowBlockingForTesting;
+
+ // This can only be instantiated by friends. Use ScopedAllowBlockingForTesting
+ // in unit tests to avoid the friend requirement.
+ friend class AdjustOOMScoreHelper;
+ friend class android_webview::ScopedAllowInitGLBindings;
+ friend class content::BrowserProcessSubThread;
+ friend class content::RenderWidgetHostViewMac; // http://crbug.com/121917
+ friend class content::WebContentsViewMac;
+ friend class cronet::CronetPrefsManager;
+ friend class cronet::CronetURLRequestContext;
+ friend class memory_instrumentation::OSMetrics;
+ friend class module_installer::ScopedAllowModulePakLoad;
+ friend class mojo::CoreLibraryInitializer;
+ friend class printing::PrintJobWorker;
+ friend class resource_coordinator::TabManagerDelegate; // crbug.com/778703
+ friend class ui::MaterialDesignController;
+ friend class web::WebSubThread;
+ friend class StackSamplingProfiler;
+ friend class weblayer::ProfileImpl;
+ friend class content::RenderProcessHostImpl;
+ friend class weblayer::WebLayerPathProvider;
+
+ ScopedAllowBlocking() EMPTY_BODY_IF_DCHECK_IS_OFF;
+ ~ScopedAllowBlocking() EMPTY_BODY_IF_DCHECK_IS_OFF;
+
+#if DCHECK_IS_ON()
+ const bool was_disallowed_;
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedAllowBlocking);
+};
+
+class ScopedAllowBlockingForTesting {
+ public:
+ ScopedAllowBlockingForTesting() {}
+ ~ScopedAllowBlockingForTesting() {}
+
+ private:
+#if DCHECK_IS_ON()
+ ScopedAllowBlocking scoped_allow_blocking_;
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedAllowBlockingForTesting);
+};
+
+INLINE_IF_DCHECK_IS_OFF void DisallowBaseSyncPrimitives()
+ EMPTY_BODY_IF_DCHECK_IS_OFF;
+
+class BASE_EXPORT ScopedAllowBaseSyncPrimitives {
+ private:
+ // This can only be instantiated by friends. Use
+ // ScopedAllowBaseSyncPrimitivesForTesting in unit tests to avoid the friend
+ // requirement.
+ FRIEND_TEST_ALL_PREFIXES(ThreadRestrictionsTest,
+ ScopedAllowBaseSyncPrimitives);
+ FRIEND_TEST_ALL_PREFIXES(ThreadRestrictionsTest,
+ ScopedAllowBaseSyncPrimitivesResetsState);
+ FRIEND_TEST_ALL_PREFIXES(ThreadRestrictionsTest,
+ ScopedAllowBaseSyncPrimitivesWithBlockingDisallowed);
+
+ // Allowed usage:
+ friend class SimpleThread;
+ friend class base::GetAppOutputScopedAllowBaseSyncPrimitives;
+ friend class blink::SourceStream;
+ friend class blink::WorkerThread;
+ friend class blink::scheduler::WorkerThread;
+ friend class chrome_cleaner::SystemReportComponent;
+ friend class content::BrowserMainLoop;
+ friend class content::BrowserProcessSubThread;
+ friend class content::ServiceWorkerContextClient;
+ friend class functions::ExecScriptScopedAllowBaseSyncPrimitives;
+ friend class history_report::HistoryReportJniBridge;
+ friend class internal::TaskTracker;
+ friend class leveldb_env::DBTracker;
+ friend class media::BlockingUrlProtocol;
+ friend class mojo::core::ScopedIPCSupport;
+ friend class net::MultiThreadedCertVerifierScopedAllowBaseSyncPrimitives;
+ friend class rlz_lib::FinancialPing;
+ friend class shell_integration_linux::
+ LaunchXdgUtilityScopedAllowBaseSyncPrimitives;
+ friend class syncer::HttpBridge;
+ friend class syncer::GetLocalChangesRequest;
+ friend class syncer::ModelSafeWorker;
+ friend class webrtc::DesktopConfigurationMonitor;
+
+ // Usage that should be fixed:
+ friend class ::NativeBackendKWallet; // http://crbug.com/125331
+ friend class ::chromeos::system::
+ StatisticsProviderImpl; // http://crbug.com/125385
+ friend class content::TextInputClientMac; // http://crbug.com/121917
+ friend class blink::VideoFrameResourceProvider; // http://crbug.com/878070
+
+ ScopedAllowBaseSyncPrimitives() EMPTY_BODY_IF_DCHECK_IS_OFF;
+ ~ScopedAllowBaseSyncPrimitives() EMPTY_BODY_IF_DCHECK_IS_OFF;
+
+#if DCHECK_IS_ON()
+ const bool was_disallowed_;
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedAllowBaseSyncPrimitives);
+};
+
+class BASE_EXPORT ScopedAllowBaseSyncPrimitivesOutsideBlockingScope {
+ private:
+ // This can only be instantiated by friends. Use
+ // ScopedAllowBaseSyncPrimitivesForTesting in unit tests to avoid the friend
+ // requirement.
+ FRIEND_TEST_ALL_PREFIXES(ThreadRestrictionsTest,
+ ScopedAllowBaseSyncPrimitivesOutsideBlockingScope);
+ FRIEND_TEST_ALL_PREFIXES(
+ ThreadRestrictionsTest,
+ ScopedAllowBaseSyncPrimitivesOutsideBlockingScopeResetsState);
+
+ // Allowed usage:
+ friend class ::BrowserProcessImpl; // http://crbug.com/125207
+ friend class ::KeyStorageLinux;
+ friend class ::NativeDesktopMediaList;
+ friend class ::StartupTimeBomb;
+ friend class android::JavaHandlerThread;
+ friend class android_webview::
+ AwFormDatabaseService; // http://crbug.com/904431
+ friend class android_webview::CookieManager;
+ friend class android_webview::VizCompositorThreadRunnerWebView;
+ friend class audio::OutputDevice;
+ friend class base::sequence_manager::internal::TaskQueueImpl;
+ friend class base::FileDescriptorWatcher;
+ friend class base::internal::JobTaskSource;
+ friend class base::ScopedAllowThreadRecallForStackSamplingProfiler;
+ friend class base::StackSamplingProfiler;
+ friend class blink::RTCVideoDecoderAdapter;
+ friend class blink::RTCVideoEncoder;
+ friend class cc::TileTaskManagerImpl;
+ friend class content::CategorizedWorkerPool;
+ friend class content::DesktopCaptureDevice;
+ friend class content::InProcessUtilityThread;
+ friend class content::RTCVideoDecoder;
+ friend class content::SandboxHostLinux;
+ friend class content::ScopedAllowWaitForDebugURL;
+ friend class content::SynchronousCompositor;
+ friend class content::SynchronousCompositorHost;
+ friend class content::SynchronousCompositorSyncCallBridge;
+ friend class media::AudioInputDevice;
+ friend class media::AudioOutputDevice;
+ friend class media::PaintCanvasVideoRenderer;
+ friend class mojo::SyncCallRestrictions;
+ friend class net::NetworkConfigWatcherMacThread;
+ friend class viz::HostGpuMemoryBufferManager;
+ friend class vr::VrShell;
+
+ // Usage that should be fixed:
+ friend class ::chromeos::BlockingMethodCaller; // http://crbug.com/125360
+ friend class base::Thread; // http://crbug.com/918039
+ friend class cc::CompletionEvent; // http://crbug.com/902653
+ friend class content::
+ BrowserGpuChannelHostFactory; // http://crbug.com/125248
+ friend class dbus::Bus; // http://crbug.com/125222
+ friend class disk_cache::BackendImpl; // http://crbug.com/74623
+ friend class disk_cache::InFlightIO; // http://crbug.com/74623
+ friend class gpu::GpuChannelHost; // http://crbug.com/125264
+ friend class remoting::protocol::
+ ScopedAllowThreadJoinForWebRtcTransport; // http://crbug.com/660081
+ friend class midi::TaskService; // https://crbug.com/796830
+ friend class net::internal::AddressTrackerLinux; // http://crbug.com/125097
+ friend class net::
+ MultiThreadedProxyResolverScopedAllowJoinOnIO; // http://crbug.com/69710
+ friend class net::NetworkChangeNotifierMac; // http://crbug.com/125097
+ friend class printing::PrinterQuery; // http://crbug.com/66082
+ friend class proxy_resolver::
+ ScopedAllowThreadJoinForProxyResolverV8Tracing; // http://crbug.com/69710
+ friend class remoting::AutoThread; // https://crbug.com/944316
+ // Not used in production yet, https://crbug.com/844078.
+ friend class service_manager::ServiceProcessLauncher;
+ friend class ui::WindowResizeHelperMac; // http://crbug.com/902829
+
+ ScopedAllowBaseSyncPrimitivesOutsideBlockingScope()
+ EMPTY_BODY_IF_DCHECK_IS_OFF;
+ ~ScopedAllowBaseSyncPrimitivesOutsideBlockingScope()
+ EMPTY_BODY_IF_DCHECK_IS_OFF;
+
+#if DCHECK_IS_ON()
+ const bool was_disallowed_;
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedAllowBaseSyncPrimitivesOutsideBlockingScope);
+};
+
+class BASE_EXPORT ScopedAllowBaseSyncPrimitivesForTesting {
+ public:
+ ScopedAllowBaseSyncPrimitivesForTesting() EMPTY_BODY_IF_DCHECK_IS_OFF;
+ ~ScopedAllowBaseSyncPrimitivesForTesting() EMPTY_BODY_IF_DCHECK_IS_OFF;
+
+ private:
+#if DCHECK_IS_ON()
+ const bool was_disallowed_;
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedAllowBaseSyncPrimitivesForTesting);
+};
+
+// Counterpart to base::DisallowUnresponsiveTasks() for tests to allow them to
+// block their thread after it was banned.
+class BASE_EXPORT ScopedAllowUnresponsiveTasksForTesting {
+ public:
+ ScopedAllowUnresponsiveTasksForTesting() EMPTY_BODY_IF_DCHECK_IS_OFF;
+ ~ScopedAllowUnresponsiveTasksForTesting() EMPTY_BODY_IF_DCHECK_IS_OFF;
+
+ private:
+#if DCHECK_IS_ON()
+ const bool was_disallowed_base_sync_;
+ const bool was_disallowed_blocking_;
+ const bool was_disallowed_cpu_;
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedAllowUnresponsiveTasksForTesting);
+};
+
+namespace internal {
+
+// Asserts that waiting on a //base sync primitive is allowed in the current
+// scope.
+INLINE_IF_DCHECK_IS_OFF void AssertBaseSyncPrimitivesAllowed()
+ EMPTY_BODY_IF_DCHECK_IS_OFF;
+
+// Resets all thread restrictions on the current thread.
+INLINE_IF_DCHECK_IS_OFF void ResetThreadRestrictionsForTesting()
+ EMPTY_BODY_IF_DCHECK_IS_OFF;
+
+} // namespace internal
+
+// Asserts that running long CPU work is allowed in the current scope.
+INLINE_IF_DCHECK_IS_OFF void AssertLongCPUWorkAllowed()
+ EMPTY_BODY_IF_DCHECK_IS_OFF;
+
+INLINE_IF_DCHECK_IS_OFF void DisallowUnresponsiveTasks()
+ EMPTY_BODY_IF_DCHECK_IS_OFF;
+
+class BASE_EXPORT ThreadRestrictions {
+ public:
+ // Constructing a ScopedAllowIO temporarily allows IO for the current
+ // thread. Doing this is almost certainly always incorrect.
+ //
+ // DEPRECATED. Use ScopedAllowBlocking(ForTesting).
+ class BASE_EXPORT ScopedAllowIO {
+ public:
+ ScopedAllowIO() EMPTY_BODY_IF_DCHECK_IS_OFF;
+ ~ScopedAllowIO() EMPTY_BODY_IF_DCHECK_IS_OFF;
+
+ private:
+#if DCHECK_IS_ON()
+ const bool was_allowed_;
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedAllowIO);
+ };
+
+#if DCHECK_IS_ON()
+ // Set whether the current thread to make IO calls.
+ // Threads start out in the *allowed* state.
+ // Returns the previous value.
+ //
+ // DEPRECATED. Use ScopedAllowBlocking(ForTesting) or ScopedDisallowBlocking.
+ static bool SetIOAllowed(bool allowed);
+
+ // Set whether the current thread can use singletons. Returns the previous
+ // value.
+ static bool SetSingletonAllowed(bool allowed);
+
+ // Check whether the current thread is allowed to use singletons (Singleton /
+ // LazyInstance). DCHECKs if not.
+ static void AssertSingletonAllowed();
+
+ // Disable waiting on the current thread. Threads start out in the *allowed*
+ // state. Returns the previous value.
+ //
+ // DEPRECATED. Use DisallowBaseSyncPrimitives.
+ static void DisallowWaiting();
+#else
+ // Inline the empty definitions of these functions so that they can be
+ // compiled out.
+ static bool SetIOAllowed(bool allowed) { return true; }
+ static bool SetSingletonAllowed(bool allowed) { return true; }
+ static void AssertSingletonAllowed() {}
+ static void DisallowWaiting() {}
+#endif
+
+ private:
+ // DO NOT ADD ANY OTHER FRIEND STATEMENTS.
+ // BEGIN ALLOWED USAGE.
+ friend class content::BrowserMainLoop;
+ friend class content::BrowserShutdownProfileDumper;
+ friend class content::BrowserTestBase;
+ friend class content::ScopedAllowWaitForDebugURL;
+ friend class ::HistogramSynchronizer;
+ friend class internal::TaskTracker;
+ friend class web::WebMainLoop;
+ friend class MessagePumpDefault;
+ friend class PlatformThread;
+ friend class ui::CommandBufferClientImpl;
+ friend class ui::CommandBufferLocal;
+ friend class ui::GpuState;
+
+ // END ALLOWED USAGE.
+ // BEGIN USAGE THAT NEEDS TO BE FIXED.
+ friend class chrome_browser_net::Predictor; // http://crbug.com/78451
+#if !defined(OFFICIAL_BUILD)
+ friend class content::SoftwareOutputDeviceMus; // Interim non-production code
+#endif
+// END USAGE THAT NEEDS TO BE FIXED.
+
+#if DCHECK_IS_ON()
+ // DEPRECATED. Use ScopedAllowBaseSyncPrimitives.
+ static bool SetWaitAllowed(bool allowed);
+#else
+ static bool SetWaitAllowed(bool allowed) { return true; }
+#endif
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ThreadRestrictions);
+};
+
+#undef INLINE_IF_DCHECK_IS_OFF
+#undef EMPTY_BODY_IF_DCHECK_IS_OFF
+
+} // namespace base
+
+#endif // BASE_THREADING_THREAD_RESTRICTIONS_H_
diff --git a/security/sandbox/chromium/base/time/time.cc b/security/sandbox/chromium/base/time/time.cc
new file mode 100644
index 0000000000..1293cdcaad
--- /dev/null
+++ b/security/sandbox/chromium/base/time/time.cc
@@ -0,0 +1,433 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/time/time.h"
+
+#include <cmath>
+#include <ios>
+#include <limits>
+#include <ostream>
+#include <sstream>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/no_destructor.h"
+#include "base/strings/stringprintf.h"
+#include "base/third_party/nspr/prtime.h"
+#include "base/time/time_override.h"
+#include "build/build_config.h"
+
+namespace base {
+
+namespace internal {
+
+TimeNowFunction g_time_now_function = &subtle::TimeNowIgnoringOverride;
+
+TimeNowFunction g_time_now_from_system_time_function =
+ &subtle::TimeNowFromSystemTimeIgnoringOverride;
+
+TimeTicksNowFunction g_time_ticks_now_function =
+ &subtle::TimeTicksNowIgnoringOverride;
+
+ThreadTicksNowFunction g_thread_ticks_now_function =
+ &subtle::ThreadTicksNowIgnoringOverride;
+
+} // namespace internal
+
+// TimeDelta ------------------------------------------------------------------
+
+int TimeDelta::InDays() const {
+ if (is_max()) {
+ // Preserve max to prevent overflow.
+ return std::numeric_limits<int>::max();
+ }
+ return static_cast<int>(delta_ / Time::kMicrosecondsPerDay);
+}
+
+int TimeDelta::InDaysFloored() const {
+ if (is_max()) {
+ // Preserve max to prevent overflow.
+ return std::numeric_limits<int>::max();
+ }
+ int result = delta_ / Time::kMicrosecondsPerDay;
+ int64_t remainder = delta_ - (result * Time::kMicrosecondsPerDay);
+ if (remainder < 0) {
+ --result; // Use floor(), not trunc() rounding behavior.
+ }
+ return result;
+}
+
+int TimeDelta::InHours() const {
+ if (is_max()) {
+ // Preserve max to prevent overflow.
+ return std::numeric_limits<int>::max();
+ }
+ return static_cast<int>(delta_ / Time::kMicrosecondsPerHour);
+}
+
+int TimeDelta::InMinutes() const {
+ if (is_max()) {
+ // Preserve max to prevent overflow.
+ return std::numeric_limits<int>::max();
+ }
+ return static_cast<int>(delta_ / Time::kMicrosecondsPerMinute);
+}
+
+double TimeDelta::InSecondsF() const {
+ if (is_max()) {
+ // Preserve max to prevent overflow.
+ return std::numeric_limits<double>::infinity();
+ }
+ return static_cast<double>(delta_) / Time::kMicrosecondsPerSecond;
+}
+
+int64_t TimeDelta::InSeconds() const {
+ if (is_max()) {
+ // Preserve max to prevent overflow.
+ return std::numeric_limits<int64_t>::max();
+ }
+ return delta_ / Time::kMicrosecondsPerSecond;
+}
+
+double TimeDelta::InMillisecondsF() const {
+ if (is_max()) {
+ // Preserve max to prevent overflow.
+ return std::numeric_limits<double>::infinity();
+ }
+ return static_cast<double>(delta_) / Time::kMicrosecondsPerMillisecond;
+}
+
+int64_t TimeDelta::InMilliseconds() const {
+ if (is_max()) {
+ // Preserve max to prevent overflow.
+ return std::numeric_limits<int64_t>::max();
+ }
+ return delta_ / Time::kMicrosecondsPerMillisecond;
+}
+
+int64_t TimeDelta::InMillisecondsRoundedUp() const {
+ if (is_max()) {
+ // Preserve max to prevent overflow.
+ return std::numeric_limits<int64_t>::max();
+ }
+ int64_t result = delta_ / Time::kMicrosecondsPerMillisecond;
+ int64_t remainder = delta_ - (result * Time::kMicrosecondsPerMillisecond);
+ if (remainder > 0) {
+ ++result; // Use ceil(), not trunc() rounding behavior.
+ }
+ return result;
+}
+
+double TimeDelta::InMicrosecondsF() const {
+ if (is_max()) {
+ // Preserve max to prevent overflow.
+ return std::numeric_limits<double>::infinity();
+ }
+ return static_cast<double>(delta_);
+}
+
+int64_t TimeDelta::InNanoseconds() const {
+ if (is_max()) {
+ // Preserve max to prevent overflow.
+ return std::numeric_limits<int64_t>::max();
+ }
+ return delta_ * Time::kNanosecondsPerMicrosecond;
+}
+
+std::ostream& operator<<(std::ostream& os, TimeDelta time_delta) {
+ return os << time_delta.InSecondsF() << " s";
+}
+
+// Time -----------------------------------------------------------------------
+
+// static
+Time Time::Now() {
+ return internal::g_time_now_function();
+}
+
+// static
+Time Time::NowFromSystemTime() {
+ // Just use g_time_now_function because it returns the system time.
+ return internal::g_time_now_from_system_time_function();
+}
+
+// static
+Time Time::FromDeltaSinceWindowsEpoch(TimeDelta delta) {
+ return Time(delta.InMicroseconds());
+}
+
+TimeDelta Time::ToDeltaSinceWindowsEpoch() const {
+ return TimeDelta::FromMicroseconds(us_);
+}
+
+// static
+Time Time::FromTimeT(time_t tt) {
+ if (tt == 0)
+ return Time(); // Preserve 0 so we can tell it doesn't exist.
+ if (tt == std::numeric_limits<time_t>::max())
+ return Max();
+ return Time(kTimeTToMicrosecondsOffset) + TimeDelta::FromSeconds(tt);
+}
+
+time_t Time::ToTimeT() const {
+ if (is_null())
+ return 0; // Preserve 0 so we can tell it doesn't exist.
+ if (is_max()) {
+ // Preserve max without offset to prevent overflow.
+ return std::numeric_limits<time_t>::max();
+ }
+ if (std::numeric_limits<int64_t>::max() - kTimeTToMicrosecondsOffset <= us_) {
+ DLOG(WARNING) << "Overflow when converting base::Time with internal " <<
+ "value " << us_ << " to time_t.";
+ return std::numeric_limits<time_t>::max();
+ }
+ return (us_ - kTimeTToMicrosecondsOffset) / kMicrosecondsPerSecond;
+}
+
+// static
+Time Time::FromDoubleT(double dt) {
+ if (dt == 0 || std::isnan(dt))
+ return Time(); // Preserve 0 so we can tell it doesn't exist.
+ return Time(kTimeTToMicrosecondsOffset) + TimeDelta::FromSecondsD(dt);
+}
+
+double Time::ToDoubleT() const {
+ if (is_null())
+ return 0; // Preserve 0 so we can tell it doesn't exist.
+ if (is_max()) {
+ // Preserve max without offset to prevent overflow.
+ return std::numeric_limits<double>::infinity();
+ }
+ return (static_cast<double>(us_ - kTimeTToMicrosecondsOffset) /
+ static_cast<double>(kMicrosecondsPerSecond));
+}
+
+#if defined(OS_POSIX)
+// static
+Time Time::FromTimeSpec(const timespec& ts) {
+ return FromDoubleT(ts.tv_sec +
+ static_cast<double>(ts.tv_nsec) /
+ base::Time::kNanosecondsPerSecond);
+}
+#endif
+
+// static
+Time Time::FromJsTime(double ms_since_epoch) {
+ // The epoch is a valid time, so this constructor doesn't interpret
+ // 0 as the null time.
+ return Time(kTimeTToMicrosecondsOffset) +
+ TimeDelta::FromMillisecondsD(ms_since_epoch);
+}
+
+double Time::ToJsTime() const {
+ if (is_null()) {
+ // Preserve 0 so the invalid result doesn't depend on the platform.
+ return 0;
+ }
+ return ToJsTimeIgnoringNull();
+}
+
+double Time::ToJsTimeIgnoringNull() const {
+ if (is_max()) {
+ // Preserve max without offset to prevent overflow.
+ return std::numeric_limits<double>::infinity();
+ }
+ return (static_cast<double>(us_ - kTimeTToMicrosecondsOffset) /
+ kMicrosecondsPerMillisecond);
+}
+
+Time Time::FromJavaTime(int64_t ms_since_epoch) {
+ return base::Time::UnixEpoch() +
+ base::TimeDelta::FromMilliseconds(ms_since_epoch);
+}
+
+int64_t Time::ToJavaTime() const {
+ if (is_null()) {
+ // Preserve 0 so the invalid result doesn't depend on the platform.
+ return 0;
+ }
+ if (is_max()) {
+ // Preserve max without offset to prevent overflow.
+ return std::numeric_limits<int64_t>::max();
+ }
+ return ((us_ - kTimeTToMicrosecondsOffset) /
+ kMicrosecondsPerMillisecond);
+}
+
+// static
+Time Time::UnixEpoch() {
+ Time time;
+ time.us_ = kTimeTToMicrosecondsOffset;
+ return time;
+}
+
+Time Time::Midnight(bool is_local) const {
+ Exploded exploded;
+ Explode(is_local, &exploded);
+ exploded.hour = 0;
+ exploded.minute = 0;
+ exploded.second = 0;
+ exploded.millisecond = 0;
+ Time out_time;
+ if (FromExploded(is_local, exploded, &out_time)) {
+ return out_time;
+ } else if (is_local) {
+ // Hitting this branch means 00:00:00am of the current day
+ // does not exist (due to Daylight Saving Time in some countries
+ // where clocks are shifted at midnight). In this case, midnight
+ // should be defined as 01:00:00am.
+ exploded.hour = 1;
+ if (FromExploded(is_local, exploded, &out_time))
+ return out_time;
+ }
+ // This function must not fail.
+ NOTREACHED();
+ return Time();
+}
+
+#if !defined(MOZ_SANDBOX)
+// static
+bool Time::FromStringInternal(const char* time_string,
+ bool is_local,
+ Time* parsed_time) {
+ DCHECK((time_string != nullptr) && (parsed_time != nullptr));
+
+ if (time_string[0] == '\0')
+ return false;
+
+ PRTime result_time = 0;
+ PRStatus result = PR_ParseTimeString(time_string,
+ is_local ? PR_FALSE : PR_TRUE,
+ &result_time);
+ if (PR_SUCCESS != result)
+ return false;
+
+ result_time += kTimeTToMicrosecondsOffset;
+ *parsed_time = Time(result_time);
+ return true;
+}
+#endif
+
+// static
+bool Time::ExplodedMostlyEquals(const Exploded& lhs, const Exploded& rhs) {
+ return lhs.year == rhs.year && lhs.month == rhs.month &&
+ lhs.day_of_month == rhs.day_of_month && lhs.hour == rhs.hour &&
+ lhs.minute == rhs.minute && lhs.second == rhs.second &&
+ lhs.millisecond == rhs.millisecond;
+}
+
+// static
+bool Time::FromMillisecondsSinceUnixEpoch(int64_t unix_milliseconds,
+ Time* time) {
+ // Adjust the provided time from milliseconds since the Unix epoch (1970) to
+ // microseconds since the Windows epoch (1601), avoiding overflows.
+ base::CheckedNumeric<int64_t> checked_microseconds_win_epoch =
+ unix_milliseconds;
+ checked_microseconds_win_epoch *= kMicrosecondsPerMillisecond;
+ checked_microseconds_win_epoch += kTimeTToMicrosecondsOffset;
+ if (!checked_microseconds_win_epoch.IsValid()) {
+ *time = base::Time(0);
+ return false;
+ }
+
+ *time = Time(checked_microseconds_win_epoch.ValueOrDie());
+ return true;
+}
+
+int64_t Time::ToRoundedDownMillisecondsSinceUnixEpoch() const {
+ // Adjust from Windows epoch (1601) to Unix epoch (1970).
+ int64_t microseconds = us_ - kTimeTToMicrosecondsOffset;
+
+ // Round the microseconds towards -infinity.
+ if (microseconds >= 0) {
+ // In this case, rounding towards -infinity means rounding towards 0.
+ return microseconds / kMicrosecondsPerMillisecond;
+ } else {
+ return (microseconds - kMicrosecondsPerMillisecond + 1) /
+ kMicrosecondsPerMillisecond;
+ }
+}
+
+std::ostream& operator<<(std::ostream& os, Time time) {
+ Time::Exploded exploded;
+ time.UTCExplode(&exploded);
+ // Use StringPrintf because iostreams formatting is painful.
+ return os << StringPrintf("%04d-%02d-%02d %02d:%02d:%02d.%03d UTC",
+ exploded.year,
+ exploded.month,
+ exploded.day_of_month,
+ exploded.hour,
+ exploded.minute,
+ exploded.second,
+ exploded.millisecond);
+}
+
+// TimeTicks ------------------------------------------------------------------
+
+// static
+TimeTicks TimeTicks::Now() {
+ return internal::g_time_ticks_now_function();
+}
+
+// static
+TimeTicks TimeTicks::UnixEpoch() {
+ static const base::NoDestructor<base::TimeTicks> epoch([]() {
+ return subtle::TimeTicksNowIgnoringOverride() -
+ (subtle::TimeNowIgnoringOverride() - Time::UnixEpoch());
+ }());
+ return *epoch;
+}
+
+TimeTicks TimeTicks::SnappedToNextTick(TimeTicks tick_phase,
+ TimeDelta tick_interval) const {
+ // |interval_offset| is the offset from |this| to the next multiple of
+ // |tick_interval| after |tick_phase|, possibly negative if in the past.
+ TimeDelta interval_offset = (tick_phase - *this) % tick_interval;
+ // If |this| is exactly on the interval (i.e. offset==0), don't adjust.
+ // Otherwise, if |tick_phase| was in the past, adjust forward to the next
+ // tick after |this|.
+ if (!interval_offset.is_zero() && tick_phase < *this)
+ interval_offset += tick_interval;
+ return *this + interval_offset;
+}
+
+std::ostream& operator<<(std::ostream& os, TimeTicks time_ticks) {
+ // This function formats a TimeTicks object as "bogo-microseconds".
+ // The origin and granularity of the count are platform-specific, and may very
+ // from run to run. Although bogo-microseconds usually roughly correspond to
+ // real microseconds, the only real guarantee is that the number never goes
+ // down during a single run.
+ const TimeDelta as_time_delta = time_ticks - TimeTicks();
+ return os << as_time_delta.InMicroseconds() << " bogo-microseconds";
+}
+
+// ThreadTicks ----------------------------------------------------------------
+
+// static
+ThreadTicks ThreadTicks::Now() {
+ return internal::g_thread_ticks_now_function();
+}
+
+std::ostream& operator<<(std::ostream& os, ThreadTicks thread_ticks) {
+ const TimeDelta as_time_delta = thread_ticks - ThreadTicks();
+ return os << as_time_delta.InMicroseconds() << " bogo-thread-microseconds";
+}
+
+// Time::Exploded -------------------------------------------------------------
+
+inline bool is_in_range(int value, int lo, int hi) {
+ return lo <= value && value <= hi;
+}
+
+bool Time::Exploded::HasValidValues() const {
+ return is_in_range(month, 1, 12) &&
+ is_in_range(day_of_week, 0, 6) &&
+ is_in_range(day_of_month, 1, 31) &&
+ is_in_range(hour, 0, 23) &&
+ is_in_range(minute, 0, 59) &&
+ is_in_range(second, 0, 60) &&
+ is_in_range(millisecond, 0, 999);
+}
+
+} // namespace base
diff --git a/security/sandbox/chromium/base/time/time.h b/security/sandbox/chromium/base/time/time.h
new file mode 100644
index 0000000000..7214e000f0
--- /dev/null
+++ b/security/sandbox/chromium/base/time/time.h
@@ -0,0 +1,1077 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Time represents an absolute point in coordinated universal time (UTC),
+// internally represented as microseconds (s/1,000,000) since the Windows epoch
+// (1601-01-01 00:00:00 UTC). System-dependent clock interface routines are
+// defined in time_PLATFORM.cc. Note that values for Time may skew and jump
+// around as the operating system makes adjustments to synchronize (e.g., with
+// NTP servers). Thus, client code that uses the Time class must account for
+// this.
+//
+// TimeDelta represents a duration of time, internally represented in
+// microseconds.
+//
+// TimeTicks and ThreadTicks represent an abstract time that is most of the time
+// incrementing, for use in measuring time durations. Internally, they are
+// represented in microseconds. They cannot be converted to a human-readable
+// time, but are guaranteed not to decrease (unlike the Time class). Note that
+// TimeTicks may "stand still" (e.g., if the computer is suspended), and
+// ThreadTicks will "stand still" whenever the thread has been de-scheduled by
+// the operating system.
+//
+// All time classes are copyable, assignable, and occupy 64-bits per instance.
+// As a result, prefer passing them by value:
+// void MyFunction(TimeDelta arg);
+// If circumstances require, you may also pass by const reference:
+// void MyFunction(const TimeDelta& arg); // Not preferred.
+//
+// Definitions of operator<< are provided to make these types work with
+// DCHECK_EQ() and other log macros. For human-readable formatting, see
+// "base/i18n/time_formatting.h".
+//
+// So many choices! Which time class should you use? Examples:
+//
+// Time: Interpreting the wall-clock time provided by a remote system.
+// Detecting whether cached resources have expired. Providing the
+// user with a display of the current date and time. Determining
+// the amount of time between events across re-boots of the
+// machine.
+//
+// TimeTicks: Tracking the amount of time a task runs. Executing delayed
+// tasks at the right time. Computing presentation timestamps.
+// Synchronizing audio and video using TimeTicks as a common
+// reference clock (lip-sync). Measuring network round-trip
+// latency.
+//
+// ThreadTicks: Benchmarking how long the current thread has been doing actual
+// work.
+
+#ifndef BASE_TIME_TIME_H_
+#define BASE_TIME_TIME_H_
+
+#include <stdint.h>
+#include <time.h>
+
+#include <iosfwd>
+#include <limits>
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "base/logging.h"
+#include "base/numerics/safe_math.h"
+#include "build/build_config.h"
+
+#if defined(OS_FUCHSIA)
+#include <zircon/types.h>
+#endif
+
+#if defined(OS_MACOSX)
+#include <CoreFoundation/CoreFoundation.h>
+// Avoid Mac system header macro leak.
+#undef TYPE_BOOL
+#endif
+
+#if defined(OS_ANDROID)
+#include <jni.h>
+#endif
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+#include <unistd.h>
+#include <sys/time.h>
+#endif
+
+#if defined(OS_WIN)
+#include "base/gtest_prod_util.h"
+#include "base/win/windows_types.h"
+#endif
+
+namespace ABI {
+namespace Windows {
+namespace Foundation {
+struct DateTime;
+} // namespace Foundation
+} // namespace Windows
+} // namespace ABI
+
+namespace base {
+
+class PlatformThreadHandle;
+class TimeDelta;
+
+// The functions in the time_internal namespace are meant to be used only by the
+// time classes and functions. Please use the math operators defined in the
+// time classes instead.
+namespace time_internal {
+
+// Add or subtract a TimeDelta from |value|. TimeDelta::Min()/Max() are treated
+// as infinity and will always saturate the return value (infinity math applies
+// if |value| also is at either limit of its spectrum). The int64_t argument and
+// return value are in terms of a microsecond timebase.
+BASE_EXPORT constexpr int64_t SaturatedAdd(int64_t value, TimeDelta delta);
+BASE_EXPORT constexpr int64_t SaturatedSub(int64_t value, TimeDelta delta);
+
+} // namespace time_internal
+
+// TimeDelta ------------------------------------------------------------------
+
+class BASE_EXPORT TimeDelta {
+ public:
+ constexpr TimeDelta() : delta_(0) {}
+
+ // Converts units of time to TimeDeltas.
+ // WARNING: Floating point arithmetic is such that FromXXXD(t.InXXXF()) may
+ // not precisely equal |t|. Hence, floating point values should not be used
+ // for storage.
+ static constexpr TimeDelta FromDays(int days);
+ static constexpr TimeDelta FromHours(int hours);
+ static constexpr TimeDelta FromMinutes(int minutes);
+ static constexpr TimeDelta FromSeconds(int64_t secs);
+ static constexpr TimeDelta FromMilliseconds(int64_t ms);
+ static constexpr TimeDelta FromMicroseconds(int64_t us);
+ static constexpr TimeDelta FromNanoseconds(int64_t ns);
+ static constexpr TimeDelta FromSecondsD(double secs);
+ static constexpr TimeDelta FromMillisecondsD(double ms);
+ static constexpr TimeDelta FromMicrosecondsD(double us);
+ static constexpr TimeDelta FromNanosecondsD(double ns);
+#if defined(OS_WIN)
+ static TimeDelta FromQPCValue(LONGLONG qpc_value);
+ // TODO(crbug.com/989694): Avoid base::TimeDelta factory functions
+ // based on absolute time
+ static TimeDelta FromFileTime(FILETIME ft);
+ static TimeDelta FromWinrtDateTime(ABI::Windows::Foundation::DateTime dt);
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+ static TimeDelta FromTimeSpec(const timespec& ts);
+#endif
+#if defined(OS_FUCHSIA)
+ static TimeDelta FromZxDuration(zx_duration_t nanos);
+#endif
+
+ // Converts an integer value representing TimeDelta to a class. This is used
+ // when deserializing a |TimeDelta| structure, using a value known to be
+ // compatible. It is not provided as a constructor because the integer type
+ // may be unclear from the perspective of a caller.
+ //
+ // DEPRECATED - Do not use in new code. http://crbug.com/634507
+ static constexpr TimeDelta FromInternalValue(int64_t delta) {
+ return TimeDelta(delta);
+ }
+
+ // Returns the maximum time delta, which should be greater than any reasonable
+ // time delta we might compare it to. Adding or subtracting the maximum time
+ // delta to a time or another time delta has an undefined result.
+ static constexpr TimeDelta Max();
+
+ // Returns the minimum time delta, which should be less than than any
+ // reasonable time delta we might compare it to. Adding or subtracting the
+ // minimum time delta to a time or another time delta has an undefined result.
+ static constexpr TimeDelta Min();
+
+ // Returns the internal numeric value of the TimeDelta object. Please don't
+ // use this and do arithmetic on it, as it is more error prone than using the
+ // provided operators.
+ // For serializing, use FromInternalValue to reconstitute.
+ //
+ // DEPRECATED - Do not use in new code. http://crbug.com/634507
+ constexpr int64_t ToInternalValue() const { return delta_; }
+
+ // Returns the magnitude (absolute value) of this TimeDelta.
+ constexpr TimeDelta magnitude() const {
+ // Some toolchains provide an incomplete C++11 implementation and lack an
+ // int64_t overload for std::abs(). The following is a simple branchless
+ // implementation:
+ const int64_t mask = delta_ >> (sizeof(delta_) * 8 - 1);
+ return TimeDelta((delta_ + mask) ^ mask);
+ }
+
+ // Returns true if the time delta is zero.
+ constexpr bool is_zero() const { return delta_ == 0; }
+
+ // Returns true if the time delta is the maximum/minimum time delta.
+ constexpr bool is_max() const {
+ return delta_ == std::numeric_limits<int64_t>::max();
+ }
+ constexpr bool is_min() const {
+ return delta_ == std::numeric_limits<int64_t>::min();
+ }
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+ struct timespec ToTimeSpec() const;
+#endif
+#if defined(OS_FUCHSIA)
+ zx_duration_t ToZxDuration() const;
+#endif
+#if defined(OS_WIN)
+ ABI::Windows::Foundation::DateTime ToWinrtDateTime() const;
+#endif
+
+ // Returns the time delta in some unit. The InXYZF versions return a floating
+ // point value. The InXYZ versions return a truncated value (aka rounded
+ // towards zero, std::trunc() behavior). The InXYZFloored() versions round to
+ // lesser integers (std::floor() behavior). The XYZRoundedUp() versions round
+ // up to greater integers (std::ceil() behavior).
+ // WARNING: Floating point arithmetic is such that FromXXXD(t.InXXXF()) may
+ // not precisely equal |t|. Hence, floating point values should not be used
+ // for storage.
+ int InDays() const;
+ int InDaysFloored() const;
+ int InHours() const;
+ int InMinutes() const;
+ double InSecondsF() const;
+ int64_t InSeconds() const;
+ double InMillisecondsF() const;
+ int64_t InMilliseconds() const;
+ int64_t InMillisecondsRoundedUp() const;
+ constexpr int64_t InMicroseconds() const { return delta_; }
+ double InMicrosecondsF() const;
+ int64_t InNanoseconds() const;
+
+ // Computations with other deltas.
+ constexpr TimeDelta operator+(TimeDelta other) const {
+ return TimeDelta(time_internal::SaturatedAdd(delta_, other));
+ }
+ constexpr TimeDelta operator-(TimeDelta other) const {
+ return TimeDelta(time_internal::SaturatedSub(delta_, other));
+ }
+
+ constexpr TimeDelta& operator+=(TimeDelta other) {
+ return *this = (*this + other);
+ }
+ constexpr TimeDelta& operator-=(TimeDelta other) {
+ return *this = (*this - other);
+ }
+ constexpr TimeDelta operator-() const { return TimeDelta(-delta_); }
+
+ // Computations with numeric types.
+ template <typename T>
+ constexpr TimeDelta operator*(T a) const {
+ CheckedNumeric<int64_t> rv(delta_);
+ rv *= a;
+ if (rv.IsValid())
+ return TimeDelta(rv.ValueOrDie());
+ // Matched sign overflows. Mismatched sign underflows.
+ if ((delta_ < 0) ^ (a < 0))
+ return TimeDelta(std::numeric_limits<int64_t>::min());
+ return TimeDelta(std::numeric_limits<int64_t>::max());
+ }
+ template <typename T>
+ constexpr TimeDelta operator/(T a) const {
+ CheckedNumeric<int64_t> rv(delta_);
+ rv /= a;
+ if (rv.IsValid())
+ return TimeDelta(rv.ValueOrDie());
+ // Matched sign overflows. Mismatched sign underflows.
+ // Special case to catch divide by zero.
+ if ((delta_ < 0) ^ (a <= 0))
+ return TimeDelta(std::numeric_limits<int64_t>::min());
+ return TimeDelta(std::numeric_limits<int64_t>::max());
+ }
+ template <typename T>
+ constexpr TimeDelta& operator*=(T a) {
+ return *this = (*this * a);
+ }
+ template <typename T>
+ constexpr TimeDelta& operator/=(T a) {
+ return *this = (*this / a);
+ }
+
+ constexpr int64_t operator/(TimeDelta a) const { return delta_ / a.delta_; }
+
+ constexpr TimeDelta operator%(TimeDelta a) const {
+ return TimeDelta(delta_ % a.delta_);
+ }
+ TimeDelta& operator%=(TimeDelta other) { return *this = (*this % other); }
+
+ // Comparison operators.
+ constexpr bool operator==(TimeDelta other) const {
+ return delta_ == other.delta_;
+ }
+ constexpr bool operator!=(TimeDelta other) const {
+ return delta_ != other.delta_;
+ }
+ constexpr bool operator<(TimeDelta other) const {
+ return delta_ < other.delta_;
+ }
+ constexpr bool operator<=(TimeDelta other) const {
+ return delta_ <= other.delta_;
+ }
+ constexpr bool operator>(TimeDelta other) const {
+ return delta_ > other.delta_;
+ }
+ constexpr bool operator>=(TimeDelta other) const {
+ return delta_ >= other.delta_;
+ }
+
+ private:
+ friend constexpr int64_t time_internal::SaturatedAdd(int64_t value,
+ TimeDelta delta);
+ friend constexpr int64_t time_internal::SaturatedSub(int64_t value,
+ TimeDelta delta);
+
+ // Constructs a delta given the duration in microseconds. This is private
+ // to avoid confusion by callers with an integer constructor. Use
+ // FromSeconds, FromMilliseconds, etc. instead.
+ constexpr explicit TimeDelta(int64_t delta_us) : delta_(delta_us) {}
+
+ // Private method to build a delta from a double.
+ static constexpr TimeDelta FromDouble(double value);
+
+ // Private method to build a delta from the product of a user-provided value
+ // and a known-positive value.
+ static constexpr TimeDelta FromProduct(int64_t value, int64_t positive_value);
+
+ // Delta in microseconds.
+ int64_t delta_;
+};
+
+template <typename T>
+constexpr TimeDelta operator*(T a, TimeDelta td) {
+ return td * a;
+}
+
+// For logging use only.
+BASE_EXPORT std::ostream& operator<<(std::ostream& os, TimeDelta time_delta);
+
+// Do not reference the time_internal::TimeBase template class directly. Please
+// use one of the time subclasses instead, and only reference the public
+// TimeBase members via those classes.
+namespace time_internal {
+
+constexpr int64_t SaturatedAdd(int64_t value, TimeDelta delta) {
+ // Treat Min/Max() as +/- infinity (additions involving two infinities are
+ // only valid if signs match).
+ if (delta.is_max()) {
+ CHECK_GT(value, std::numeric_limits<int64_t>::min());
+ return std::numeric_limits<int64_t>::max();
+ } else if (delta.is_min()) {
+ CHECK_LT(value, std::numeric_limits<int64_t>::max());
+ return std::numeric_limits<int64_t>::min();
+ }
+
+ return base::ClampAdd(value, delta.delta_);
+}
+
+constexpr int64_t SaturatedSub(int64_t value, TimeDelta delta) {
+ // Treat Min/Max() as +/- infinity (subtractions involving two infinities are
+ // only valid if signs are opposite).
+ if (delta.is_max()) {
+ CHECK_LT(value, std::numeric_limits<int64_t>::max());
+ return std::numeric_limits<int64_t>::min();
+ } else if (delta.is_min()) {
+ CHECK_GT(value, std::numeric_limits<int64_t>::min());
+ return std::numeric_limits<int64_t>::max();
+ }
+
+ return base::ClampSub(value, delta.delta_);
+}
+
+// TimeBase--------------------------------------------------------------------
+
+// Provides value storage and comparison/math operations common to all time
+// classes. Each subclass provides for strong type-checking to ensure
+// semantically meaningful comparison/math of time values from the same clock
+// source or timeline.
+template<class TimeClass>
+class TimeBase {
+ public:
+ static constexpr int64_t kHoursPerDay = 24;
+ static constexpr int64_t kSecondsPerMinute = 60;
+ static constexpr int64_t kSecondsPerHour = 60 * kSecondsPerMinute;
+ static constexpr int64_t kMillisecondsPerSecond = 1000;
+ static constexpr int64_t kMillisecondsPerDay =
+ kMillisecondsPerSecond * 60 * 60 * kHoursPerDay;
+ static constexpr int64_t kMicrosecondsPerMillisecond = 1000;
+ static constexpr int64_t kMicrosecondsPerSecond =
+ kMicrosecondsPerMillisecond * kMillisecondsPerSecond;
+ static constexpr int64_t kMicrosecondsPerMinute = kMicrosecondsPerSecond * 60;
+ static constexpr int64_t kMicrosecondsPerHour = kMicrosecondsPerMinute * 60;
+ static constexpr int64_t kMicrosecondsPerDay =
+ kMicrosecondsPerHour * kHoursPerDay;
+ static constexpr int64_t kMicrosecondsPerWeek = kMicrosecondsPerDay * 7;
+ static constexpr int64_t kNanosecondsPerMicrosecond = 1000;
+ static constexpr int64_t kNanosecondsPerSecond =
+ kNanosecondsPerMicrosecond * kMicrosecondsPerSecond;
+
+ // Returns true if this object has not been initialized.
+ //
+ // Warning: Be careful when writing code that performs math on time values,
+ // since it's possible to produce a valid "zero" result that should not be
+ // interpreted as a "null" value.
+ constexpr bool is_null() const { return us_ == 0; }
+
+ // Returns true if this object represents the maximum/minimum time.
+ constexpr bool is_max() const {
+ return us_ == std::numeric_limits<int64_t>::max();
+ }
+ constexpr bool is_min() const {
+ return us_ == std::numeric_limits<int64_t>::min();
+ }
+
+ // Returns the maximum/minimum times, which should be greater/less than than
+ // any reasonable time with which we might compare it.
+ static constexpr TimeClass Max() {
+ return TimeClass(std::numeric_limits<int64_t>::max());
+ }
+
+ static constexpr TimeClass Min() {
+ return TimeClass(std::numeric_limits<int64_t>::min());
+ }
+
+ // For serializing only. Use FromInternalValue() to reconstitute. Please don't
+ // use this and do arithmetic on it, as it is more error prone than using the
+ // provided operators.
+ //
+ // DEPRECATED - Do not use in new code. For serializing Time values, prefer
+ // Time::ToDeltaSinceWindowsEpoch().InMicroseconds(). http://crbug.com/634507
+ constexpr int64_t ToInternalValue() const { return us_; }
+
+ // The amount of time since the origin (or "zero") point. This is a syntactic
+ // convenience to aid in code readability, mainly for debugging/testing use
+ // cases.
+ //
+ // Warning: While the Time subclass has a fixed origin point, the origin for
+ // the other subclasses can vary each time the application is restarted.
+ constexpr TimeDelta since_origin() const {
+ return TimeDelta::FromMicroseconds(us_);
+ }
+
+ constexpr TimeClass& operator=(TimeClass other) {
+ us_ = other.us_;
+ return *(static_cast<TimeClass*>(this));
+ }
+
+ // Compute the difference between two times.
+ constexpr TimeDelta operator-(TimeClass other) const {
+ return TimeDelta::FromMicroseconds(us_ - other.us_);
+ }
+
+ // Return a new time modified by some delta.
+ constexpr TimeClass operator+(TimeDelta delta) const {
+ return TimeClass(time_internal::SaturatedAdd(us_, delta));
+ }
+ constexpr TimeClass operator-(TimeDelta delta) const {
+ return TimeClass(time_internal::SaturatedSub(us_, delta));
+ }
+
+ // Modify by some time delta.
+ constexpr TimeClass& operator+=(TimeDelta delta) {
+ return static_cast<TimeClass&>(*this = (*this + delta));
+ }
+ constexpr TimeClass& operator-=(TimeDelta delta) {
+ return static_cast<TimeClass&>(*this = (*this - delta));
+ }
+
+ // Comparison operators
+ constexpr bool operator==(TimeClass other) const { return us_ == other.us_; }
+ constexpr bool operator!=(TimeClass other) const { return us_ != other.us_; }
+ constexpr bool operator<(TimeClass other) const { return us_ < other.us_; }
+ constexpr bool operator<=(TimeClass other) const { return us_ <= other.us_; }
+ constexpr bool operator>(TimeClass other) const { return us_ > other.us_; }
+ constexpr bool operator>=(TimeClass other) const { return us_ >= other.us_; }
+
+ protected:
+ constexpr explicit TimeBase(int64_t us) : us_(us) {}
+
+ // Time value in a microsecond timebase.
+ int64_t us_;
+};
+
+} // namespace time_internal
+
+template <class TimeClass>
+inline constexpr TimeClass operator+(TimeDelta delta, TimeClass t) {
+ return t + delta;
+}
+
+// Time -----------------------------------------------------------------------
+
+// Represents a wall clock time in UTC. Values are not guaranteed to be
+// monotonically non-decreasing and are subject to large amounts of skew.
+// Time is stored internally as microseconds since the Windows epoch (1601).
+class BASE_EXPORT Time : public time_internal::TimeBase<Time> {
+ public:
+ // Offset of UNIX epoch (1970-01-01 00:00:00 UTC) from Windows FILETIME epoch
+ // (1601-01-01 00:00:00 UTC), in microseconds. This value is derived from the
+ // following: ((1970-1601)*365+89)*24*60*60*1000*1000, where 89 is the number
+ // of leap year days between 1601 and 1970: (1970-1601)/4 excluding 1700,
+ // 1800, and 1900.
+ static constexpr int64_t kTimeTToMicrosecondsOffset =
+ INT64_C(11644473600000000);
+
+#if defined(OS_WIN)
+ // To avoid overflow in QPC to Microseconds calculations, since we multiply
+ // by kMicrosecondsPerSecond, then the QPC value should not exceed
+ // (2^63 - 1) / 1E6. If it exceeds that threshold, we divide then multiply.
+ static constexpr int64_t kQPCOverflowThreshold = INT64_C(0x8637BD05AF7);
+#endif
+
+// kExplodedMinYear and kExplodedMaxYear define the platform-specific limits
+// for values passed to FromUTCExploded() and FromLocalExploded(). Those
+// functions will return false if passed values outside these limits. The limits
+// are inclusive, meaning that the API should support all dates within a given
+// limit year.
+#if defined(OS_WIN)
+ static constexpr int kExplodedMinYear = 1601;
+ static constexpr int kExplodedMaxYear = 30827;
+#elif defined(OS_IOS) && !__LP64__
+ static constexpr int kExplodedMinYear = std::numeric_limits<int>::min();
+ static constexpr int kExplodedMaxYear = std::numeric_limits<int>::max();
+#elif defined(OS_MACOSX)
+ static constexpr int kExplodedMinYear = 1902;
+ static constexpr int kExplodedMaxYear = std::numeric_limits<int>::max();
+#elif defined(OS_ANDROID)
+ // Though we use 64-bit time APIs on both 32 and 64 bit Android, some OS
+ // versions like KitKat (ARM but not x86 emulator) can't handle some early
+ // dates (e.g. before 1170). So we set min conservatively here.
+ static constexpr int kExplodedMinYear = 1902;
+ static constexpr int kExplodedMaxYear = std::numeric_limits<int>::max();
+#else
+ static constexpr int kExplodedMinYear =
+ (sizeof(time_t) == 4 ? 1902 : std::numeric_limits<int>::min());
+ static constexpr int kExplodedMaxYear =
+ (sizeof(time_t) == 4 ? 2037 : std::numeric_limits<int>::max());
+#endif
+
+ // Represents an exploded time that can be formatted nicely. This is kind of
+ // like the Win32 SYSTEMTIME structure or the Unix "struct tm" with a few
+ // additions and changes to prevent errors.
+ struct BASE_EXPORT Exploded {
+ int year; // Four digit year "2007"
+ int month; // 1-based month (values 1 = January, etc.)
+ int day_of_week; // 0-based day of week (0 = Sunday, etc.)
+ int day_of_month; // 1-based day of month (1-31)
+ int hour; // Hour within the current day (0-23)
+ int minute; // Minute within the current hour (0-59)
+ int second; // Second within the current minute (0-59 plus leap
+ // seconds which may take it up to 60).
+ int millisecond; // Milliseconds within the current second (0-999)
+
+ // A cursory test for whether the data members are within their
+ // respective ranges. A 'true' return value does not guarantee the
+ // Exploded value can be successfully converted to a Time value.
+ bool HasValidValues() const;
+ };
+
+ // Contains the NULL time. Use Time::Now() to get the current time.
+ constexpr Time() : TimeBase(0) {}
+
+ // Returns the time for epoch in Unix-like system (Jan 1, 1970).
+ static Time UnixEpoch();
+
+ // Returns the current time. Watch out, the system might adjust its clock
+ // in which case time will actually go backwards. We don't guarantee that
+ // times are increasing, or that two calls to Now() won't be the same.
+ static Time Now();
+
+ // Returns the current time. Same as Now() except that this function always
+ // uses system time so that there are no discrepancies between the returned
+ // time and system time even on virtual environments including our test bot.
+ // For timing sensitive unittests, this function should be used.
+ static Time NowFromSystemTime();
+
+ // Converts to/from TimeDeltas relative to the Windows epoch (1601-01-01
+ // 00:00:00 UTC). Prefer these methods for opaque serialization and
+ // deserialization of time values, e.g.
+ //
+ // // Serialization:
+ // base::Time last_updated = ...;
+ // SaveToDatabase(last_updated.ToDeltaSinceWindowsEpoch().InMicroseconds());
+ //
+ // // Deserialization:
+ // base::Time last_updated = base::Time::FromDeltaSinceWindowsEpoch(
+ // base::TimeDelta::FromMicroseconds(LoadFromDatabase()));
+ static Time FromDeltaSinceWindowsEpoch(TimeDelta delta);
+ TimeDelta ToDeltaSinceWindowsEpoch() const;
+
+ // Converts to/from time_t in UTC and a Time class.
+ static Time FromTimeT(time_t tt);
+ time_t ToTimeT() const;
+
+ // Converts time to/from a double which is the number of seconds since epoch
+ // (Jan 1, 1970). Webkit uses this format to represent time.
+ // Because WebKit initializes double time value to 0 to indicate "not
+ // initialized", we map it to empty Time object that also means "not
+ // initialized".
+ static Time FromDoubleT(double dt);
+ double ToDoubleT() const;
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+ // Converts the timespec structure to time. MacOS X 10.8.3 (and tentatively,
+ // earlier versions) will have the |ts|'s tv_nsec component zeroed out,
+ // having a 1 second resolution, which agrees with
+ // https://developer.apple.com/legacy/library/#technotes/tn/tn1150.html#HFSPlusDates.
+ static Time FromTimeSpec(const timespec& ts);
+#endif
+
+ // Converts to/from the Javascript convention for times, a number of
+ // milliseconds since the epoch:
+ // https://developer.mozilla.org/en/JavaScript/Reference/Global_Objects/Date/getTime.
+ //
+ // Don't use ToJsTime() in new code, since it contains a subtle hack (only
+ // exactly 1601-01-01 00:00 UTC is represented as 1970-01-01 00:00 UTC), and
+ // that is not appropriate for general use. Try to use ToJsTimeIgnoringNull()
+ // unless you have a very good reason to use ToJsTime().
+ static Time FromJsTime(double ms_since_epoch);
+ double ToJsTime() const;
+ double ToJsTimeIgnoringNull() const;
+
+ // Converts to/from Java convention for times, a number of milliseconds since
+ // the epoch. Because the Java format has less resolution, converting to Java
+ // time is a lossy operation.
+ static Time FromJavaTime(int64_t ms_since_epoch);
+ int64_t ToJavaTime() const;
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+ static Time FromTimeVal(struct timeval t);
+ struct timeval ToTimeVal() const;
+#endif
+
+#if defined(OS_FUCHSIA)
+ static Time FromZxTime(zx_time_t time);
+ zx_time_t ToZxTime() const;
+#endif
+
+#if defined(OS_MACOSX)
+ static Time FromCFAbsoluteTime(CFAbsoluteTime t);
+ CFAbsoluteTime ToCFAbsoluteTime() const;
+#endif
+
+#if defined(OS_WIN)
+ static Time FromFileTime(FILETIME ft);
+ FILETIME ToFileTime() const;
+
+ // The minimum time of a low resolution timer. This is basically a windows
+ // constant of ~15.6ms. While it does vary on some older OS versions, we'll
+ // treat it as static across all windows versions.
+ static const int kMinLowResolutionThresholdMs = 16;
+
+ // Enable or disable Windows high resolution timer.
+ static void EnableHighResolutionTimer(bool enable);
+
+ // Read the minimum timer interval from the feature list. This should be
+ // called once after the feature list is initialized. This is needed for
+ // an experiment - see https://crbug.com/927165
+ static void ReadMinTimerIntervalLowResMs();
+
+ // Activates or deactivates the high resolution timer based on the |activate|
+ // flag. If the HighResolutionTimer is not Enabled (see
+ // EnableHighResolutionTimer), this function will return false. Otherwise
+ // returns true. Each successful activate call must be paired with a
+ // subsequent deactivate call.
+ // All callers to activate the high resolution timer must eventually call
+ // this function to deactivate the high resolution timer.
+ static bool ActivateHighResolutionTimer(bool activate);
+
+ // Returns true if the high resolution timer is both enabled and activated.
+ // This is provided for testing only, and is not tracked in a thread-safe
+ // way.
+ static bool IsHighResolutionTimerInUse();
+
+ // The following two functions are used to report the fraction of elapsed time
+ // that the high resolution timer is activated.
+ // ResetHighResolutionTimerUsage() resets the cumulative usage and starts the
+ // measurement interval and GetHighResolutionTimerUsage() returns the
+ // percentage of time since the reset that the high resolution timer was
+ // activated.
+ // ResetHighResolutionTimerUsage() must be called at least once before calling
+ // GetHighResolutionTimerUsage(); otherwise the usage result would be
+ // undefined.
+ static void ResetHighResolutionTimerUsage();
+ static double GetHighResolutionTimerUsage();
+#endif // defined(OS_WIN)
+
+ // Converts an exploded structure representing either the local time or UTC
+ // into a Time class. Returns false on a failure when, for example, a day of
+ // month is set to 31 on a 28-30 day month. Returns Time(0) on overflow.
+ static bool FromUTCExploded(const Exploded& exploded,
+ Time* time) WARN_UNUSED_RESULT {
+ return FromExploded(false, exploded, time);
+ }
+ static bool FromLocalExploded(const Exploded& exploded,
+ Time* time) WARN_UNUSED_RESULT {
+ return FromExploded(true, exploded, time);
+ }
+
+ // Converts a string representation of time to a Time object.
+ // An example of a time string which is converted is as below:-
+ // "Tue, 15 Nov 1994 12:45:26 GMT". If the timezone is not specified
+ // in the input string, FromString assumes local time and FromUTCString
+ // assumes UTC. A timezone that cannot be parsed (e.g. "UTC" which is not
+ // specified in RFC822) is treated as if the timezone is not specified.
+ //
+ // WARNING: the underlying converter is very permissive. For example: it is
+ // not checked whether a given day of the week matches the date; Feb 29
+ // silently becomes Mar 1 in non-leap years; under certain conditions, whole
+ // English sentences may be parsed successfully and yield unexpected results.
+ //
+ // TODO(iyengar) Move the FromString/FromTimeT/ToTimeT/FromFileTime to
+ // a new time converter class.
+ static bool FromString(const char* time_string,
+ Time* parsed_time) WARN_UNUSED_RESULT {
+ return FromStringInternal(time_string, true, parsed_time);
+ }
+ static bool FromUTCString(const char* time_string,
+ Time* parsed_time) WARN_UNUSED_RESULT {
+ return FromStringInternal(time_string, false, parsed_time);
+ }
+
+ // Fills the given exploded structure with either the local time or UTC from
+ // this time structure (containing UTC).
+ void UTCExplode(Exploded* exploded) const {
+ return Explode(false, exploded);
+ }
+ void LocalExplode(Exploded* exploded) const {
+ return Explode(true, exploded);
+ }
+
+ // The following two functions round down the time to the nearest day in
+ // either UTC or local time. It will represent midnight on that day.
+ Time UTCMidnight() const { return Midnight(false); }
+ Time LocalMidnight() const { return Midnight(true); }
+
+ // Converts an integer value representing Time to a class. This may be used
+ // when deserializing a |Time| structure, using a value known to be
+ // compatible. It is not provided as a constructor because the integer type
+ // may be unclear from the perspective of a caller.
+ //
+ // DEPRECATED - Do not use in new code. For deserializing Time values, prefer
+ // Time::FromDeltaSinceWindowsEpoch(). http://crbug.com/634507
+ static constexpr Time FromInternalValue(int64_t us) { return Time(us); }
+
+ private:
+ friend class time_internal::TimeBase<Time>;
+
+ constexpr explicit Time(int64_t microseconds_since_win_epoch)
+ : TimeBase(microseconds_since_win_epoch) {}
+
+ // Explodes the given time to either local time |is_local = true| or UTC
+ // |is_local = false|.
+ void Explode(bool is_local, Exploded* exploded) const;
+
+ // Unexplodes a given time assuming the source is either local time
+ // |is_local = true| or UTC |is_local = false|. Function returns false on
+ // failure and sets |time| to Time(0). Otherwise returns true and sets |time|
+ // to non-exploded time.
+ static bool FromExploded(bool is_local,
+ const Exploded& exploded,
+ Time* time) WARN_UNUSED_RESULT;
+
+ // Rounds down the time to the nearest day in either local time
+ // |is_local = true| or UTC |is_local = false|.
+ Time Midnight(bool is_local) const;
+
+ // Converts a string representation of time to a Time object.
+ // An example of a time string which is converted is as below:-
+ // "Tue, 15 Nov 1994 12:45:26 GMT". If the timezone is not specified
+ // in the input string, local time |is_local = true| or
+ // UTC |is_local = false| is assumed. A timezone that cannot be parsed
+ // (e.g. "UTC" which is not specified in RFC822) is treated as if the
+ // timezone is not specified.
+ static bool FromStringInternal(const char* time_string,
+ bool is_local,
+ Time* parsed_time) WARN_UNUSED_RESULT;
+
+ // Comparison does not consider |day_of_week| when doing the operation.
+ static bool ExplodedMostlyEquals(const Exploded& lhs,
+ const Exploded& rhs) WARN_UNUSED_RESULT;
+
+ // Converts the provided time in milliseconds since the Unix epoch (1970) to a
+ // Time object, avoiding overflows.
+ static bool FromMillisecondsSinceUnixEpoch(int64_t unix_milliseconds,
+ Time* time) WARN_UNUSED_RESULT;
+
+ // Returns the milliseconds since the Unix epoch (1970), rounding the
+ // microseconds towards -infinity.
+ int64_t ToRoundedDownMillisecondsSinceUnixEpoch() const;
+};
+
+// static
+constexpr TimeDelta TimeDelta::FromDays(int days) {
+ return days == std::numeric_limits<int>::max()
+ ? Max()
+ : TimeDelta(days * Time::kMicrosecondsPerDay);
+}
+
+// static
+constexpr TimeDelta TimeDelta::FromHours(int hours) {
+ return hours == std::numeric_limits<int>::max()
+ ? Max()
+ : TimeDelta(hours * Time::kMicrosecondsPerHour);
+}
+
+// static
+constexpr TimeDelta TimeDelta::FromMinutes(int minutes) {
+ return minutes == std::numeric_limits<int>::max()
+ ? Max()
+ : TimeDelta(minutes * Time::kMicrosecondsPerMinute);
+}
+
+// static
+constexpr TimeDelta TimeDelta::FromSeconds(int64_t secs) {
+ return FromProduct(secs, Time::kMicrosecondsPerSecond);
+}
+
+// static
+constexpr TimeDelta TimeDelta::FromMilliseconds(int64_t ms) {
+ return FromProduct(ms, Time::kMicrosecondsPerMillisecond);
+}
+
+// static
+constexpr TimeDelta TimeDelta::FromMicroseconds(int64_t us) {
+ return TimeDelta(us);
+}
+
+// static
+constexpr TimeDelta TimeDelta::FromNanoseconds(int64_t ns) {
+ return TimeDelta(ns / Time::kNanosecondsPerMicrosecond);
+}
+
+// static
+constexpr TimeDelta TimeDelta::FromSecondsD(double secs) {
+ return FromDouble(secs * Time::kMicrosecondsPerSecond);
+}
+
+// static
+constexpr TimeDelta TimeDelta::FromMillisecondsD(double ms) {
+ return FromDouble(ms * Time::kMicrosecondsPerMillisecond);
+}
+
+// static
+constexpr TimeDelta TimeDelta::FromMicrosecondsD(double us) {
+ return FromDouble(us);
+}
+
+// static
+constexpr TimeDelta TimeDelta::FromNanosecondsD(double ns) {
+ return FromDouble(ns / Time::kNanosecondsPerMicrosecond);
+}
+
+// static
+constexpr TimeDelta TimeDelta::Max() {
+ return TimeDelta(std::numeric_limits<int64_t>::max());
+}
+
+// static
+constexpr TimeDelta TimeDelta::Min() {
+ return TimeDelta(std::numeric_limits<int64_t>::min());
+}
+
+// static
+constexpr TimeDelta TimeDelta::FromDouble(double value) {
+ return TimeDelta(saturated_cast<int64_t>(value));
+}
+
+// static
+constexpr TimeDelta TimeDelta::FromProduct(int64_t value,
+ int64_t positive_value) {
+ DCHECK(positive_value > 0); // NOLINT, DCHECK_GT isn't constexpr.
+ return value > std::numeric_limits<int64_t>::max() / positive_value
+ ? Max()
+ : value < std::numeric_limits<int64_t>::min() / positive_value
+ ? Min()
+ : TimeDelta(value * positive_value);
+}
+
+// For logging use only.
+BASE_EXPORT std::ostream& operator<<(std::ostream& os, Time time);
+
+// TimeTicks ------------------------------------------------------------------
+
+// Represents monotonically non-decreasing clock time.
+class BASE_EXPORT TimeTicks : public time_internal::TimeBase<TimeTicks> {
+ public:
+ // The underlying clock used to generate new TimeTicks.
+ enum class Clock {
+ FUCHSIA_ZX_CLOCK_MONOTONIC,
+ LINUX_CLOCK_MONOTONIC,
+ IOS_CF_ABSOLUTE_TIME_MINUS_KERN_BOOTTIME,
+ MAC_MACH_ABSOLUTE_TIME,
+ WIN_QPC,
+ WIN_ROLLOVER_PROTECTED_TIME_GET_TIME
+ };
+
+ constexpr TimeTicks() : TimeBase(0) {}
+
+ // Platform-dependent tick count representing "right now." When
+ // IsHighResolution() returns false, the resolution of the clock could be
+ // as coarse as ~15.6ms. Otherwise, the resolution should be no worse than one
+ // microsecond.
+ static TimeTicks Now();
+
+ // Returns true if the high resolution clock is working on this system and
+ // Now() will return high resolution values. Note that, on systems where the
+ // high resolution clock works but is deemed inefficient, the low resolution
+ // clock will be used instead.
+ static bool IsHighResolution() WARN_UNUSED_RESULT;
+
+ // Returns true if TimeTicks is consistent across processes, meaning that
+ // timestamps taken on different processes can be safely compared with one
+ // another. (Note that, even on platforms where this returns true, time values
+ // from different threads that are within one tick of each other must be
+ // considered to have an ambiguous ordering.)
+ static bool IsConsistentAcrossProcesses() WARN_UNUSED_RESULT;
+
+#if defined(OS_FUCHSIA)
+ // Converts between TimeTicks and an ZX_CLOCK_MONOTONIC zx_time_t value.
+ static TimeTicks FromZxTime(zx_time_t nanos_since_boot);
+ zx_time_t ToZxTime() const;
+#endif
+
+#if defined(OS_WIN)
+ // Translates an absolute QPC timestamp into a TimeTicks value. The returned
+ // value has the same origin as Now(). Do NOT attempt to use this if
+ // IsHighResolution() returns false.
+ static TimeTicks FromQPCValue(LONGLONG qpc_value);
+#endif
+
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+ static TimeTicks FromMachAbsoluteTime(uint64_t mach_absolute_time);
+#endif // defined(OS_MACOSX) && !defined(OS_IOS)
+
+#if defined(OS_ANDROID) || defined(OS_CHROMEOS)
+ // Converts to TimeTicks the value obtained from SystemClock.uptimeMillis().
+ // Note: this convertion may be non-monotonic in relation to previously
+ // obtained TimeTicks::Now() values because of the truncation (to
+ // milliseconds) performed by uptimeMillis().
+ static TimeTicks FromUptimeMillis(int64_t uptime_millis_value);
+#endif
+
+ // Get an estimate of the TimeTick value at the time of the UnixEpoch. Because
+ // Time and TimeTicks respond differently to user-set time and NTP
+ // adjustments, this number is only an estimate. Nevertheless, this can be
+ // useful when you need to relate the value of TimeTicks to a real time and
+ // date. Note: Upon first invocation, this function takes a snapshot of the
+ // realtime clock to establish a reference point. This function will return
+ // the same value for the duration of the application, but will be different
+ // in future application runs.
+ static TimeTicks UnixEpoch();
+
+ // Returns |this| snapped to the next tick, given a |tick_phase| and
+ // repeating |tick_interval| in both directions. |this| may be before,
+ // after, or equal to the |tick_phase|.
+ TimeTicks SnappedToNextTick(TimeTicks tick_phase,
+ TimeDelta tick_interval) const;
+
+ // Returns an enum indicating the underlying clock being used to generate
+ // TimeTicks timestamps. This function should only be used for debugging and
+ // logging purposes.
+ static Clock GetClock();
+
+ // Converts an integer value representing TimeTicks to a class. This may be
+ // used when deserializing a |TimeTicks| structure, using a value known to be
+ // compatible. It is not provided as a constructor because the integer type
+ // may be unclear from the perspective of a caller.
+ //
+ // DEPRECATED - Do not use in new code. For deserializing TimeTicks values,
+ // prefer TimeTicks + TimeDelta(). http://crbug.com/634507
+ static constexpr TimeTicks FromInternalValue(int64_t us) {
+ return TimeTicks(us);
+ }
+
+ protected:
+#if defined(OS_WIN)
+ typedef DWORD (*TickFunctionType)(void);
+ static TickFunctionType SetMockTickFunction(TickFunctionType ticker);
+#endif
+
+ private:
+ friend class time_internal::TimeBase<TimeTicks>;
+
+ // Please use Now() to create a new object. This is for internal use
+ // and testing.
+ constexpr explicit TimeTicks(int64_t us) : TimeBase(us) {}
+};
+
+// For logging use only.
+BASE_EXPORT std::ostream& operator<<(std::ostream& os, TimeTicks time_ticks);
+
+// ThreadTicks ----------------------------------------------------------------
+
+// Represents a clock, specific to a particular thread, than runs only while the
+// thread is running.
+class BASE_EXPORT ThreadTicks : public time_internal::TimeBase<ThreadTicks> {
+ public:
+ constexpr ThreadTicks() : TimeBase(0) {}
+
+ // Returns true if ThreadTicks::Now() is supported on this system.
+ static bool IsSupported() WARN_UNUSED_RESULT {
+#if (defined(_POSIX_THREAD_CPUTIME) && (_POSIX_THREAD_CPUTIME >= 0)) || \
+ (defined(OS_MACOSX) && !defined(OS_IOS)) || defined(OS_ANDROID) || \
+ defined(OS_FUCHSIA)
+ return true;
+#elif defined(OS_WIN)
+ return IsSupportedWin();
+#else
+ return false;
+#endif
+ }
+
+ // Waits until the initialization is completed. Needs to be guarded with a
+ // call to IsSupported().
+ static void WaitUntilInitialized() {
+#if defined(OS_WIN)
+ WaitUntilInitializedWin();
+#endif
+ }
+
+ // Returns thread-specific CPU-time on systems that support this feature.
+ // Needs to be guarded with a call to IsSupported(). Use this timer
+ // to (approximately) measure how much time the calling thread spent doing
+ // actual work vs. being de-scheduled. May return bogus results if the thread
+ // migrates to another CPU between two calls. Returns an empty ThreadTicks
+ // object until the initialization is completed. If a clock reading is
+ // absolutely needed, call WaitUntilInitialized() before this method.
+ static ThreadTicks Now();
+
+#if defined(OS_WIN)
+ // Similar to Now() above except this returns thread-specific CPU time for an
+ // arbitrary thread. All comments for Now() method above apply apply to this
+ // method as well.
+ static ThreadTicks GetForThread(const PlatformThreadHandle& thread_handle);
+#endif
+
+ // Converts an integer value representing ThreadTicks to a class. This may be
+ // used when deserializing a |ThreadTicks| structure, using a value known to
+ // be compatible. It is not provided as a constructor because the integer type
+ // may be unclear from the perspective of a caller.
+ //
+ // DEPRECATED - Do not use in new code. For deserializing ThreadTicks values,
+ // prefer ThreadTicks + TimeDelta(). http://crbug.com/634507
+ static constexpr ThreadTicks FromInternalValue(int64_t us) {
+ return ThreadTicks(us);
+ }
+
+ private:
+ friend class time_internal::TimeBase<ThreadTicks>;
+
+ // Please use Now() or GetForThread() to create a new object. This is for
+ // internal use and testing.
+ constexpr explicit ThreadTicks(int64_t us) : TimeBase(us) {}
+
+#if defined(OS_WIN)
+ FRIEND_TEST_ALL_PREFIXES(TimeTicks, TSCTicksPerSecond);
+
+#if defined(ARCH_CPU_ARM64)
+ // TSCTicksPerSecond is not supported on Windows on Arm systems because the
+ // cycle-counting methods use the actual CPU cycle count, and not a consistent
+ // incrementing counter.
+#else
+ // Returns the frequency of the TSC in ticks per second, or 0 if it hasn't
+ // been measured yet. Needs to be guarded with a call to IsSupported().
+ // This method is declared here rather than in the anonymous namespace to
+ // allow testing.
+ static double TSCTicksPerSecond();
+#endif
+
+ static bool IsSupportedWin() WARN_UNUSED_RESULT;
+ static void WaitUntilInitializedWin();
+#endif
+};
+
+// For logging use only.
+BASE_EXPORT std::ostream& operator<<(std::ostream& os, ThreadTicks time_ticks);
+
+} // namespace base
+
+#endif // BASE_TIME_TIME_H_
diff --git a/security/sandbox/chromium/base/time/time_exploded_posix.cc b/security/sandbox/chromium/base/time/time_exploded_posix.cc
new file mode 100644
index 0000000000..ca8a7880bf
--- /dev/null
+++ b/security/sandbox/chromium/base/time/time_exploded_posix.cc
@@ -0,0 +1,287 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/time/time.h"
+
+#include <stdint.h>
+#include <sys/time.h>
+#include <time.h>
+#if defined(OS_ANDROID) && !defined(__LP64__)
+#include <time64.h>
+#endif
+#include <unistd.h>
+
+#include <limits>
+
+#include "base/numerics/safe_math.h"
+#include "base/synchronization/lock.h"
+#include "build/build_config.h"
+
+#if defined(OS_ANDROID)
+#include "base/os_compat_android.h"
+#elif defined(OS_NACL)
+#include "base/os_compat_nacl.h"
+#endif
+
+#if defined(OS_MACOSX) || defined(OS_IOS)
+static_assert(sizeof(time_t) >= 8, "Y2038 problem!");
+#endif
+
+namespace {
+
+// This prevents a crash on traversing the environment global and looking up
+// the 'TZ' variable in libc. See: crbug.com/390567.
+base::Lock* GetSysTimeToTimeStructLock() {
+ static auto* lock = new base::Lock();
+ return lock;
+}
+
+// Define a system-specific SysTime that wraps either to a time_t or
+// a time64_t depending on the host system, and associated convertion.
+// See crbug.com/162007
+#if defined(OS_ANDROID) && !defined(__LP64__)
+typedef time64_t SysTime;
+
+SysTime SysTimeFromTimeStruct(struct tm* timestruct, bool is_local) {
+ base::AutoLock locked(*GetSysTimeToTimeStructLock());
+ if (is_local)
+ return mktime64(timestruct);
+ else
+ return timegm64(timestruct);
+}
+
+void SysTimeToTimeStruct(SysTime t, struct tm* timestruct, bool is_local) {
+ base::AutoLock locked(*GetSysTimeToTimeStructLock());
+ if (is_local)
+ localtime64_r(&t, timestruct);
+ else
+ gmtime64_r(&t, timestruct);
+}
+#elif defined(OS_AIX)
+// The function timegm is not available on AIX.
+time_t aix_timegm(struct tm* tm) {
+ time_t ret;
+ char* tz;
+
+ tz = getenv("TZ");
+ if (tz) {
+ tz = strdup(tz);
+ }
+ setenv("TZ", "GMT0", 1);
+ tzset();
+ ret = mktime(tm);
+ if (tz) {
+ setenv("TZ", tz, 1);
+ free(tz);
+ } else {
+ unsetenv("TZ");
+ }
+ tzset();
+ return ret;
+}
+
+typedef time_t SysTime;
+
+SysTime SysTimeFromTimeStruct(struct tm* timestruct, bool is_local) {
+ base::AutoLock locked(*GetSysTimeToTimeStructLock());
+ if (is_local)
+ return mktime(timestruct);
+ else
+ return aix_timegm(timestruct);
+}
+
+void SysTimeToTimeStruct(SysTime t, struct tm* timestruct, bool is_local) {
+ base::AutoLock locked(*GetSysTimeToTimeStructLock());
+ if (is_local)
+ localtime_r(&t, timestruct);
+ else
+ gmtime_r(&t, timestruct);
+}
+
+#else
+typedef time_t SysTime;
+
+SysTime SysTimeFromTimeStruct(struct tm* timestruct, bool is_local) {
+ base::AutoLock locked(*GetSysTimeToTimeStructLock());
+ return is_local ? mktime(timestruct) : timegm(timestruct);
+}
+
+void SysTimeToTimeStruct(SysTime t, struct tm* timestruct, bool is_local) {
+ base::AutoLock locked(*GetSysTimeToTimeStructLock());
+ if (is_local)
+ localtime_r(&t, timestruct);
+ else
+ gmtime_r(&t, timestruct);
+}
+#endif // defined(OS_ANDROID) && !defined(__LP64__)
+
+} // namespace
+
+namespace base {
+
+void Time::Explode(bool is_local, Exploded* exploded) const {
+ // The following values are all rounded towards -infinity.
+ int64_t milliseconds = ToRoundedDownMillisecondsSinceUnixEpoch();
+ SysTime seconds; // Seconds since epoch.
+ int millisecond; // Exploded millisecond value (0-999).
+
+ // If the microseconds were negative, the rounded down milliseconds will also
+ // be negative. For example, -1 us becomes -1 ms.
+ if (milliseconds >= 0) {
+ // Rounding towards -infinity <=> rounding towards 0, in this case.
+ seconds = milliseconds / kMillisecondsPerSecond;
+ millisecond = milliseconds % kMillisecondsPerSecond;
+ } else {
+ // Round these *down* (towards -infinity).
+ seconds =
+ (milliseconds - kMillisecondsPerSecond + 1) / kMillisecondsPerSecond;
+ // Make this nonnegative (and between 0 and 999 inclusive).
+ millisecond = milliseconds % kMillisecondsPerSecond;
+ if (millisecond < 0)
+ millisecond += kMillisecondsPerSecond;
+ }
+
+ struct tm timestruct;
+ SysTimeToTimeStruct(seconds, &timestruct, is_local);
+
+ exploded->year = timestruct.tm_year + 1900;
+ exploded->month = timestruct.tm_mon + 1;
+ exploded->day_of_week = timestruct.tm_wday;
+ exploded->day_of_month = timestruct.tm_mday;
+ exploded->hour = timestruct.tm_hour;
+ exploded->minute = timestruct.tm_min;
+ exploded->second = timestruct.tm_sec;
+ exploded->millisecond = millisecond;
+}
+
+// static
+bool Time::FromExploded(bool is_local, const Exploded& exploded, Time* time) {
+ CheckedNumeric<int> month = exploded.month;
+ month--;
+ CheckedNumeric<int> year = exploded.year;
+ year -= 1900;
+ if (!month.IsValid() || !year.IsValid()) {
+ *time = Time(0);
+ return false;
+ }
+
+ struct tm timestruct;
+ timestruct.tm_sec = exploded.second;
+ timestruct.tm_min = exploded.minute;
+ timestruct.tm_hour = exploded.hour;
+ timestruct.tm_mday = exploded.day_of_month;
+ timestruct.tm_mon = month.ValueOrDie();
+ timestruct.tm_year = year.ValueOrDie();
+ timestruct.tm_wday = exploded.day_of_week; // mktime/timegm ignore this
+ timestruct.tm_yday = 0; // mktime/timegm ignore this
+ timestruct.tm_isdst = -1; // attempt to figure it out
+#if !defined(OS_NACL) && !defined(OS_SOLARIS) && !defined(OS_AIX)
+ timestruct.tm_gmtoff = 0; // not a POSIX field, so mktime/timegm ignore
+ timestruct.tm_zone = nullptr; // not a POSIX field, so mktime/timegm ignore
+#endif
+
+ SysTime seconds;
+
+ // Certain exploded dates do not really exist due to daylight saving times,
+ // and this causes mktime() to return implementation-defined values when
+ // tm_isdst is set to -1. On Android, the function will return -1, while the
+ // C libraries of other platforms typically return a liberally-chosen value.
+ // Handling this requires the special code below.
+
+ // SysTimeFromTimeStruct() modifies the input structure, save current value.
+ struct tm timestruct0 = timestruct;
+
+ seconds = SysTimeFromTimeStruct(&timestruct, is_local);
+ if (seconds == -1) {
+ // Get the time values with tm_isdst == 0 and 1, then select the closest one
+ // to UTC 00:00:00 that isn't -1.
+ timestruct = timestruct0;
+ timestruct.tm_isdst = 0;
+ int64_t seconds_isdst0 = SysTimeFromTimeStruct(&timestruct, is_local);
+
+ timestruct = timestruct0;
+ timestruct.tm_isdst = 1;
+ int64_t seconds_isdst1 = SysTimeFromTimeStruct(&timestruct, is_local);
+
+ // seconds_isdst0 or seconds_isdst1 can be -1 for some timezones.
+ // E.g. "CLST" (Chile Summer Time) returns -1 for 'tm_isdt == 1'.
+ if (seconds_isdst0 < 0)
+ seconds = seconds_isdst1;
+ else if (seconds_isdst1 < 0)
+ seconds = seconds_isdst0;
+ else
+ seconds = std::min(seconds_isdst0, seconds_isdst1);
+ }
+
+ // Handle overflow. Clamping the range to what mktime and timegm might
+ // return is the best that can be done here. It's not ideal, but it's better
+ // than failing here or ignoring the overflow case and treating each time
+ // overflow as one second prior to the epoch.
+ int64_t milliseconds = 0;
+ if (seconds == -1 && (exploded.year < 1969 || exploded.year > 1970)) {
+ // If exploded.year is 1969 or 1970, take -1 as correct, with the
+ // time indicating 1 second prior to the epoch. (1970 is allowed to handle
+ // time zone and DST offsets.) Otherwise, return the most future or past
+ // time representable. Assumes the time_t epoch is 1970-01-01 00:00:00 UTC.
+ //
+ // The minimum and maximum representible times that mktime and timegm could
+ // return are used here instead of values outside that range to allow for
+ // proper round-tripping between exploded and counter-type time
+ // representations in the presence of possible truncation to time_t by
+ // division and use with other functions that accept time_t.
+ //
+ // When representing the most distant time in the future, add in an extra
+ // 999ms to avoid the time being less than any other possible value that
+ // this function can return.
+
+ // On Android, SysTime is int64_t, special care must be taken to avoid
+ // overflows.
+ const int64_t min_seconds = (sizeof(SysTime) < sizeof(int64_t))
+ ? std::numeric_limits<SysTime>::min()
+ : std::numeric_limits<int32_t>::min();
+ const int64_t max_seconds = (sizeof(SysTime) < sizeof(int64_t))
+ ? std::numeric_limits<SysTime>::max()
+ : std::numeric_limits<int32_t>::max();
+ if (exploded.year < 1969) {
+ milliseconds = min_seconds * kMillisecondsPerSecond;
+ } else {
+ milliseconds = max_seconds * kMillisecondsPerSecond;
+ milliseconds += (kMillisecondsPerSecond - 1);
+ }
+ } else {
+ CheckedNumeric<int64_t> checked_millis = seconds;
+ checked_millis *= kMillisecondsPerSecond;
+ checked_millis += exploded.millisecond;
+ if (!checked_millis.IsValid()) {
+ *time = Time(0);
+ return false;
+ }
+ milliseconds = checked_millis.ValueOrDie();
+ }
+
+ Time converted_time;
+ if (!FromMillisecondsSinceUnixEpoch(milliseconds, &converted_time)) {
+ *time = base::Time(0);
+ return false;
+ }
+
+ // If |exploded.day_of_month| is set to 31 on a 28-30 day month, it will
+ // return the first day of the next month. Thus round-trip the time and
+ // compare the initial |exploded| with |utc_to_exploded| time.
+ Time::Exploded to_exploded;
+ if (!is_local)
+ converted_time.UTCExplode(&to_exploded);
+ else
+ converted_time.LocalExplode(&to_exploded);
+
+ if (ExplodedMostlyEquals(to_exploded, exploded)) {
+ *time = converted_time;
+ return true;
+ }
+
+ *time = Time(0);
+ return false;
+}
+
+} // namespace base
diff --git a/security/sandbox/chromium/base/time/time_now_posix.cc b/security/sandbox/chromium/base/time/time_now_posix.cc
new file mode 100644
index 0000000000..4ce93c0811
--- /dev/null
+++ b/security/sandbox/chromium/base/time/time_now_posix.cc
@@ -0,0 +1,122 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/time/time.h"
+
+#include <stdint.h>
+#include <sys/time.h>
+#include <time.h>
+#if defined(OS_ANDROID) && !defined(__LP64__)
+#include <time64.h>
+#endif
+#include <unistd.h>
+
+#include "base/logging.h"
+#include "base/numerics/safe_math.h"
+#include "base/time/time_override.h"
+#include "build/build_config.h"
+
+// Ensure the Fuchsia and Mac builds do not include this module. Instead,
+// non-POSIX implementation is used for sampling the system clocks.
+#if defined(OS_FUCHSIA) || defined(OS_MACOSX)
+#error "This implementation is for POSIX platforms other than Fuchsia or Mac."
+#endif
+
+namespace {
+
+int64_t ConvertTimespecToMicros(const struct timespec& ts) {
+ // On 32-bit systems, the calculation cannot overflow int64_t.
+ // 2**32 * 1000000 + 2**64 / 1000 < 2**63
+ if (sizeof(ts.tv_sec) <= 4 && sizeof(ts.tv_nsec) <= 8) {
+ int64_t result = ts.tv_sec;
+ result *= base::Time::kMicrosecondsPerSecond;
+ result += (ts.tv_nsec / base::Time::kNanosecondsPerMicrosecond);
+ return result;
+ }
+ base::CheckedNumeric<int64_t> result(ts.tv_sec);
+ result *= base::Time::kMicrosecondsPerSecond;
+ result += (ts.tv_nsec / base::Time::kNanosecondsPerMicrosecond);
+ return result.ValueOrDie();
+}
+
+// Helper function to get results from clock_gettime() and convert to a
+// microsecond timebase. Minimum requirement is MONOTONIC_CLOCK to be supported
+// on the system. FreeBSD 6 has CLOCK_MONOTONIC but defines
+// _POSIX_MONOTONIC_CLOCK to -1.
+#if (defined(OS_POSIX) && defined(_POSIX_MONOTONIC_CLOCK) && \
+ _POSIX_MONOTONIC_CLOCK >= 0) || \
+ defined(OS_BSD) || defined(OS_ANDROID)
+int64_t ClockNow(clockid_t clk_id) {
+ struct timespec ts;
+ CHECK(clock_gettime(clk_id, &ts) == 0);
+ return ConvertTimespecToMicros(ts);
+}
+#else // _POSIX_MONOTONIC_CLOCK
+#error No usable tick clock function on this platform.
+#endif // _POSIX_MONOTONIC_CLOCK
+
+} // namespace
+
+namespace base {
+
+// Time -----------------------------------------------------------------------
+
+namespace subtle {
+Time TimeNowIgnoringOverride() {
+ struct timeval tv;
+ struct timezone tz = {0, 0}; // UTC
+ CHECK(gettimeofday(&tv, &tz) == 0);
+ // Combine seconds and microseconds in a 64-bit field containing microseconds
+ // since the epoch. That's enough for nearly 600 centuries. Adjust from
+ // Unix (1970) to Windows (1601) epoch.
+ return Time() + TimeDelta::FromMicroseconds(
+ (tv.tv_sec * Time::kMicrosecondsPerSecond + tv.tv_usec) +
+ Time::kTimeTToMicrosecondsOffset);
+}
+
+Time TimeNowFromSystemTimeIgnoringOverride() {
+ // Just use TimeNowIgnoringOverride() because it returns the system time.
+ return TimeNowIgnoringOverride();
+}
+} // namespace subtle
+
+// TimeTicks ------------------------------------------------------------------
+
+namespace subtle {
+TimeTicks TimeTicksNowIgnoringOverride() {
+ return TimeTicks() + TimeDelta::FromMicroseconds(ClockNow(CLOCK_MONOTONIC));
+}
+} // namespace subtle
+
+// static
+TimeTicks::Clock TimeTicks::GetClock() {
+ return Clock::LINUX_CLOCK_MONOTONIC;
+}
+
+// static
+bool TimeTicks::IsHighResolution() {
+ return true;
+}
+
+// static
+bool TimeTicks::IsConsistentAcrossProcesses() {
+ return true;
+}
+
+// ThreadTicks ----------------------------------------------------------------
+
+namespace subtle {
+ThreadTicks ThreadTicksNowIgnoringOverride() {
+#if (defined(_POSIX_THREAD_CPUTIME) && (_POSIX_THREAD_CPUTIME >= 0)) || \
+ defined(OS_ANDROID)
+ return ThreadTicks() +
+ TimeDelta::FromMicroseconds(ClockNow(CLOCK_THREAD_CPUTIME_ID));
+#else
+ NOTREACHED();
+ return ThreadTicks();
+#endif
+}
+} // namespace subtle
+
+} // namespace base
diff --git a/security/sandbox/chromium/base/time/time_override.h b/security/sandbox/chromium/base/time/time_override.h
new file mode 100644
index 0000000000..ad3180c62a
--- /dev/null
+++ b/security/sandbox/chromium/base/time/time_override.h
@@ -0,0 +1,74 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TIME_TIME_OVERRIDE_H_
+#define BASE_TIME_TIME_OVERRIDE_H_
+
+#include "base/base_export.h"
+#include "base/time/time.h"
+
+namespace base {
+
+using TimeNowFunction = decltype(&Time::Now);
+using TimeTicksNowFunction = decltype(&TimeTicks::Now);
+using ThreadTicksNowFunction = decltype(&ThreadTicks::Now);
+
+// Time overrides should be used with extreme caution. Discuss with //base/time
+// OWNERS before adding a new one.
+namespace subtle {
+
+// Override the return value of Time::Now and Time::NowFromSystemTime /
+// TimeTicks::Now / ThreadTicks::Now to emulate time, e.g. for tests or to
+// modify progression of time. Note that the override should be set while
+// single-threaded and before the first call to Now() to avoid threading issues
+// and inconsistencies in returned values. Nested overrides are not allowed.
+class BASE_EXPORT ScopedTimeClockOverrides {
+ public:
+ // Pass |nullptr| for any override if it shouldn't be overriden.
+ ScopedTimeClockOverrides(TimeNowFunction time_override,
+ TimeTicksNowFunction time_ticks_override,
+ ThreadTicksNowFunction thread_ticks_override);
+
+ // Restores the platform default Now() functions.
+ ~ScopedTimeClockOverrides();
+
+ static bool overrides_active() { return overrides_active_; }
+
+ private:
+ static bool overrides_active_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedTimeClockOverrides);
+};
+
+// These methods return the platform default Time::Now / TimeTicks::Now /
+// ThreadTicks::Now values even while an override is in place. These methods
+// should only be used in places where emulated time should be disregarded. For
+// example, they can be used to implement test timeouts for tests that may
+// override time.
+BASE_EXPORT Time TimeNowIgnoringOverride();
+BASE_EXPORT Time TimeNowFromSystemTimeIgnoringOverride();
+BASE_EXPORT TimeTicks TimeTicksNowIgnoringOverride();
+BASE_EXPORT ThreadTicks ThreadTicksNowIgnoringOverride();
+
+} // namespace subtle
+
+namespace internal {
+
+// These function pointers are used by platform-independent implementations of
+// the Now() methods and ScopedTimeClockOverrides. They are set to point to the
+// respective NowIgnoringOverride functions by default, but can also be set by
+// platform-specific code to select a default implementation at runtime, thereby
+// avoiding the indirection via the NowIgnoringOverride functions. Note that the
+// pointers can be overridden and later reset to the NowIgnoringOverride
+// functions by ScopedTimeClockOverrides.
+extern TimeNowFunction g_time_now_function;
+extern TimeNowFunction g_time_now_from_system_time_function;
+extern TimeTicksNowFunction g_time_ticks_now_function;
+extern ThreadTicksNowFunction g_thread_ticks_now_function;
+
+} // namespace internal
+
+} // namespace base
+
+#endif // BASE_TIME_TIME_OVERRIDE_H_
diff --git a/security/sandbox/chromium/base/time/time_win.cc b/security/sandbox/chromium/base/time/time_win.cc
new file mode 100644
index 0000000000..c1976e64a6
--- /dev/null
+++ b/security/sandbox/chromium/base/time/time_win.cc
@@ -0,0 +1,810 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+// Windows Timer Primer
+//
+// A good article: http://www.ddj.com/windows/184416651
+// A good mozilla bug: http://bugzilla.mozilla.org/show_bug.cgi?id=363258
+//
+// The default windows timer, GetSystemTimeAsFileTime is not very precise.
+// It is only good to ~15.5ms.
+//
+// QueryPerformanceCounter is the logical choice for a high-precision timer.
+// However, it is known to be buggy on some hardware. Specifically, it can
+// sometimes "jump". On laptops, QPC can also be very expensive to call.
+// It's 3-4x slower than timeGetTime() on desktops, but can be 10x slower
+// on laptops. A unittest exists which will show the relative cost of various
+// timers on any system.
+//
+// The next logical choice is timeGetTime(). timeGetTime has a precision of
+// 1ms, but only if you call APIs (timeBeginPeriod()) which affect all other
+// applications on the system. By default, precision is only 15.5ms.
+// Unfortunately, we don't want to call timeBeginPeriod because we don't
+// want to affect other applications. Further, on mobile platforms, use of
+// faster multimedia timers can hurt battery life. See the intel
+// article about this here:
+// http://softwarecommunity.intel.com/articles/eng/1086.htm
+//
+// To work around all this, we're going to generally use timeGetTime(). We
+// will only increase the system-wide timer if we're not running on battery
+// power.
+
+#include "base/time/time.h"
+
+#include <windows.foundation.h>
+#include <windows.h>
+#include <mmsystem.h>
+#include <stdint.h>
+
+#include "base/atomicops.h"
+#include "base/bit_cast.h"
+#include "base/cpu.h"
+#include "base/feature_list.h"
+#include "base/logging.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/platform_thread.h"
+#include "base/time/time_override.h"
+#include "base/time/time_win_features.h"
+
+namespace base {
+
+namespace {
+
+// From MSDN, FILETIME "Contains a 64-bit value representing the number of
+// 100-nanosecond intervals since January 1, 1601 (UTC)."
+int64_t FileTimeToMicroseconds(const FILETIME& ft) {
+ // Need to bit_cast to fix alignment, then divide by 10 to convert
+ // 100-nanoseconds to microseconds. This only works on little-endian
+ // machines.
+ return bit_cast<int64_t, FILETIME>(ft) / 10;
+}
+
+void MicrosecondsToFileTime(int64_t us, FILETIME* ft) {
+ DCHECK_GE(us, 0LL) << "Time is less than 0, negative values are not "
+ "representable in FILETIME";
+
+ // Multiply by 10 to convert microseconds to 100-nanoseconds. Bit_cast will
+ // handle alignment problems. This only works on little-endian machines.
+ *ft = bit_cast<FILETIME, int64_t>(us * 10);
+}
+
+int64_t CurrentWallclockMicroseconds() {
+ FILETIME ft;
+ ::GetSystemTimeAsFileTime(&ft);
+ return FileTimeToMicroseconds(ft);
+}
+
+// Time between resampling the un-granular clock for this API.
+constexpr TimeDelta kMaxTimeToAvoidDrift = TimeDelta::FromSeconds(60);
+
+int64_t g_initial_time = 0;
+TimeTicks g_initial_ticks;
+
+void InitializeClock() {
+ g_initial_ticks = subtle::TimeTicksNowIgnoringOverride();
+ g_initial_time = CurrentWallclockMicroseconds();
+}
+
+// Interval to use when on DC power.
+UINT g_battery_power_interval_ms = 4;
+// Track the last value passed to timeBeginPeriod so that we can cancel that
+// call by calling timeEndPeriod with the same value. A value of zero means that
+// the timer frequency is not currently raised.
+UINT g_last_interval_requested_ms = 0;
+// Track if MinTimerIntervalHighResMs() or MinTimerIntervalLowResMs() is active.
+// For most purposes this could also be named g_is_on_ac_power.
+bool g_high_res_timer_enabled = false;
+// How many times the high resolution timer has been called.
+uint32_t g_high_res_timer_count = 0;
+// Start time of the high resolution timer usage monitoring. This is needed
+// to calculate the usage as percentage of the total elapsed time.
+TimeTicks g_high_res_timer_usage_start;
+// The cumulative time the high resolution timer has been in use since
+// |g_high_res_timer_usage_start| moment.
+TimeDelta g_high_res_timer_usage;
+// Timestamp of the last activation change of the high resolution timer. This
+// is used to calculate the cumulative usage.
+TimeTicks g_high_res_timer_last_activation;
+// The lock to control access to the above set of variables.
+Lock* GetHighResLock() {
+ static auto* lock = new Lock();
+ return lock;
+}
+
+// The two values that ActivateHighResolutionTimer uses to set the systemwide
+// timer interrupt frequency on Windows. These control how precise timers are
+// but also have a big impact on battery life.
+
+// Used when a faster timer has been requested (g_high_res_timer_count > 0) and
+// the computer is running on AC power (plugged in) so that it's okay to go to
+// the highest frequency.
+UINT MinTimerIntervalHighResMs() {
+ return 1;
+}
+
+// Used when a faster timer has been requested (g_high_res_timer_count > 0) and
+// the computer is running on DC power (battery) so that we don't want to raise
+// the timer frequency as much.
+UINT MinTimerIntervalLowResMs() {
+ return g_battery_power_interval_ms;
+}
+
+// Calculate the desired timer interrupt interval. Note that zero means that the
+// system default should be used.
+UINT GetIntervalMs() {
+ if (!g_high_res_timer_count)
+ return 0; // Use the default, typically 15.625
+ if (g_high_res_timer_enabled)
+ return MinTimerIntervalHighResMs();
+ return MinTimerIntervalLowResMs();
+}
+
+// Compare the currently requested timer interrupt interval to the last interval
+// requested and update if necessary (by cancelling the old request and making a
+// new request). If there is no change then do nothing.
+void UpdateTimerIntervalLocked() {
+ UINT new_interval = GetIntervalMs();
+ if (new_interval == g_last_interval_requested_ms)
+ return;
+ if (g_last_interval_requested_ms) {
+ // Record how long the timer interrupt frequency was raised.
+ g_high_res_timer_usage += subtle::TimeTicksNowIgnoringOverride() -
+ g_high_res_timer_last_activation;
+ // Reset the timer interrupt back to the default.
+ timeEndPeriod(g_last_interval_requested_ms);
+ }
+ g_last_interval_requested_ms = new_interval;
+ if (g_last_interval_requested_ms) {
+ // Record when the timer interrupt was raised.
+ g_high_res_timer_last_activation = subtle::TimeTicksNowIgnoringOverride();
+ timeBeginPeriod(g_last_interval_requested_ms);
+ }
+}
+
+// Returns the current value of the performance counter.
+uint64_t QPCNowRaw() {
+ LARGE_INTEGER perf_counter_now = {};
+ // According to the MSDN documentation for QueryPerformanceCounter(), this
+ // will never fail on systems that run XP or later.
+ // https://msdn.microsoft.com/library/windows/desktop/ms644904.aspx
+ ::QueryPerformanceCounter(&perf_counter_now);
+ return perf_counter_now.QuadPart;
+}
+
+bool SafeConvertToWord(int in, WORD* out) {
+ CheckedNumeric<WORD> result = in;
+ *out = result.ValueOrDefault(std::numeric_limits<WORD>::max());
+ return result.IsValid();
+}
+
+} // namespace
+
+// Time -----------------------------------------------------------------------
+
+namespace subtle {
+Time TimeNowIgnoringOverride() {
+ if (g_initial_time == 0)
+ InitializeClock();
+
+ // We implement time using the high-resolution timers so that we can get
+ // timeouts which are smaller than 10-15ms. If we just used
+ // CurrentWallclockMicroseconds(), we'd have the less-granular timer.
+ //
+ // To make this work, we initialize the clock (g_initial_time) and the
+ // counter (initial_ctr). To compute the initial time, we can check
+ // the number of ticks that have elapsed, and compute the delta.
+ //
+ // To avoid any drift, we periodically resync the counters to the system
+ // clock.
+ while (true) {
+ TimeTicks ticks = TimeTicksNowIgnoringOverride();
+
+ // Calculate the time elapsed since we started our timer
+ TimeDelta elapsed = ticks - g_initial_ticks;
+
+ // Check if enough time has elapsed that we need to resync the clock.
+ if (elapsed > kMaxTimeToAvoidDrift) {
+ InitializeClock();
+ continue;
+ }
+
+ return Time() + elapsed + TimeDelta::FromMicroseconds(g_initial_time);
+ }
+}
+
+Time TimeNowFromSystemTimeIgnoringOverride() {
+ // Force resync.
+ InitializeClock();
+ return Time() + TimeDelta::FromMicroseconds(g_initial_time);
+}
+} // namespace subtle
+
+// static
+Time Time::FromFileTime(FILETIME ft) {
+ if (bit_cast<int64_t, FILETIME>(ft) == 0)
+ return Time();
+ if (ft.dwHighDateTime == std::numeric_limits<DWORD>::max() &&
+ ft.dwLowDateTime == std::numeric_limits<DWORD>::max())
+ return Max();
+ return Time(FileTimeToMicroseconds(ft));
+}
+
+FILETIME Time::ToFileTime() const {
+ if (is_null())
+ return bit_cast<FILETIME, int64_t>(0);
+ if (is_max()) {
+ FILETIME result;
+ result.dwHighDateTime = std::numeric_limits<DWORD>::max();
+ result.dwLowDateTime = std::numeric_limits<DWORD>::max();
+ return result;
+ }
+ FILETIME utc_ft;
+ MicrosecondsToFileTime(us_, &utc_ft);
+ return utc_ft;
+}
+
+void Time::ReadMinTimerIntervalLowResMs() {
+ AutoLock lock(*GetHighResLock());
+ // Read the setting for what interval to use on battery power.
+ g_battery_power_interval_ms =
+ base::FeatureList::IsEnabled(base::kSlowDCTimerInterruptsWin) ? 8 : 4;
+ UpdateTimerIntervalLocked();
+}
+
+// static
+// Enable raising of the system-global timer interrupt frequency to 1 kHz (when
+// enable is true, which happens when on AC power) or some lower frequency when
+// on battery power (when enable is false). If the g_high_res_timer_enabled
+// setting hasn't actually changed or if if there are no outstanding requests
+// (if g_high_res_timer_count is zero) then do nothing.
+// TL;DR - call this when going from AC to DC power or vice-versa.
+void Time::EnableHighResolutionTimer(bool enable) {
+ AutoLock lock(*GetHighResLock());
+ g_high_res_timer_enabled = enable;
+ UpdateTimerIntervalLocked();
+}
+
+// static
+// Request that the system-global Windows timer interrupt frequency be raised.
+// How high the frequency is raised depends on the system's power state and
+// possibly other options.
+// TL;DR - call this at the beginning and end of a time period where you want
+// higher frequency timer interrupts. Each call with activating=true must be
+// paired with a subsequent activating=false call.
+bool Time::ActivateHighResolutionTimer(bool activating) {
+ // We only do work on the transition from zero to one or one to zero so we
+ // can easily undo the effect (if necessary) when EnableHighResolutionTimer is
+ // called.
+ const uint32_t max = std::numeric_limits<uint32_t>::max();
+
+ AutoLock lock(*GetHighResLock());
+ if (activating) {
+ DCHECK_NE(g_high_res_timer_count, max);
+ ++g_high_res_timer_count;
+ } else {
+ DCHECK_NE(g_high_res_timer_count, 0u);
+ --g_high_res_timer_count;
+ }
+ UpdateTimerIntervalLocked();
+ return true;
+}
+
+// static
+// See if the timer interrupt interval has been set to the lowest value.
+bool Time::IsHighResolutionTimerInUse() {
+ AutoLock lock(*GetHighResLock());
+ return g_last_interval_requested_ms == MinTimerIntervalHighResMs();
+}
+
+// static
+void Time::ResetHighResolutionTimerUsage() {
+ AutoLock lock(*GetHighResLock());
+ g_high_res_timer_usage = TimeDelta();
+ g_high_res_timer_usage_start = subtle::TimeTicksNowIgnoringOverride();
+ if (g_high_res_timer_count > 0)
+ g_high_res_timer_last_activation = g_high_res_timer_usage_start;
+}
+
+// static
+double Time::GetHighResolutionTimerUsage() {
+ AutoLock lock(*GetHighResLock());
+ TimeTicks now = subtle::TimeTicksNowIgnoringOverride();
+ TimeDelta elapsed_time = now - g_high_res_timer_usage_start;
+ if (elapsed_time.is_zero()) {
+ // This is unexpected but possible if TimeTicks resolution is low and
+ // GetHighResolutionTimerUsage() is called promptly after
+ // ResetHighResolutionTimerUsage().
+ return 0.0;
+ }
+ TimeDelta used_time = g_high_res_timer_usage;
+ if (g_high_res_timer_count > 0) {
+ // If currently activated add the remainder of time since the last
+ // activation.
+ used_time += now - g_high_res_timer_last_activation;
+ }
+ return used_time.InMillisecondsF() / elapsed_time.InMillisecondsF() * 100;
+}
+
+// static
+bool Time::FromExploded(bool is_local, const Exploded& exploded, Time* time) {
+ // Create the system struct representing our exploded time. It will either be
+ // in local time or UTC.If casting from int to WORD results in overflow,
+ // fail and return Time(0).
+ SYSTEMTIME st;
+ if (!SafeConvertToWord(exploded.year, &st.wYear) ||
+ !SafeConvertToWord(exploded.month, &st.wMonth) ||
+ !SafeConvertToWord(exploded.day_of_week, &st.wDayOfWeek) ||
+ !SafeConvertToWord(exploded.day_of_month, &st.wDay) ||
+ !SafeConvertToWord(exploded.hour, &st.wHour) ||
+ !SafeConvertToWord(exploded.minute, &st.wMinute) ||
+ !SafeConvertToWord(exploded.second, &st.wSecond) ||
+ !SafeConvertToWord(exploded.millisecond, &st.wMilliseconds)) {
+ *time = Time(0);
+ return false;
+ }
+
+ FILETIME ft;
+ bool success = true;
+ // Ensure that it's in UTC.
+ if (is_local) {
+ SYSTEMTIME utc_st;
+ success = TzSpecificLocalTimeToSystemTime(nullptr, &st, &utc_st) &&
+ SystemTimeToFileTime(&utc_st, &ft);
+ } else {
+ success = !!SystemTimeToFileTime(&st, &ft);
+ }
+
+ if (!success) {
+ *time = Time(0);
+ return false;
+ }
+
+ *time = Time(FileTimeToMicroseconds(ft));
+ return true;
+}
+
+void Time::Explode(bool is_local, Exploded* exploded) const {
+ if (us_ < 0LL) {
+ // We are not able to convert it to FILETIME.
+ ZeroMemory(exploded, sizeof(*exploded));
+ return;
+ }
+
+ // FILETIME in UTC.
+ FILETIME utc_ft;
+ MicrosecondsToFileTime(us_, &utc_ft);
+
+ // FILETIME in local time if necessary.
+ bool success = true;
+ // FILETIME in SYSTEMTIME (exploded).
+ SYSTEMTIME st = {0};
+ if (is_local) {
+ SYSTEMTIME utc_st;
+ // We don't use FileTimeToLocalFileTime here, since it uses the current
+ // settings for the time zone and daylight saving time. Therefore, if it is
+ // daylight saving time, it will take daylight saving time into account,
+ // even if the time you are converting is in standard time.
+ success = FileTimeToSystemTime(&utc_ft, &utc_st) &&
+ SystemTimeToTzSpecificLocalTime(nullptr, &utc_st, &st);
+ } else {
+ success = !!FileTimeToSystemTime(&utc_ft, &st);
+ }
+
+ if (!success) {
+ NOTREACHED() << "Unable to convert time, don't know why";
+ ZeroMemory(exploded, sizeof(*exploded));
+ return;
+ }
+
+ exploded->year = st.wYear;
+ exploded->month = st.wMonth;
+ exploded->day_of_week = st.wDayOfWeek;
+ exploded->day_of_month = st.wDay;
+ exploded->hour = st.wHour;
+ exploded->minute = st.wMinute;
+ exploded->second = st.wSecond;
+ exploded->millisecond = st.wMilliseconds;
+}
+
+// TimeTicks ------------------------------------------------------------------
+
+namespace {
+
+// We define a wrapper to adapt between the __stdcall and __cdecl call of the
+// mock function, and to avoid a static constructor. Assigning an import to a
+// function pointer directly would require setup code to fetch from the IAT.
+DWORD timeGetTimeWrapper() {
+ return timeGetTime();
+}
+
+DWORD (*g_tick_function)(void) = &timeGetTimeWrapper;
+
+// A structure holding the most significant bits of "last seen" and a
+// "rollover" counter.
+union LastTimeAndRolloversState {
+ // The state as a single 32-bit opaque value.
+ subtle::Atomic32 as_opaque_32;
+
+ // The state as usable values.
+ struct {
+ // The top 8-bits of the "last" time. This is enough to check for rollovers
+ // and the small bit-size means fewer CompareAndSwap operations to store
+ // changes in state, which in turn makes for fewer retries.
+ uint8_t last_8;
+ // A count of the number of detected rollovers. Using this as bits 47-32
+ // of the upper half of a 64-bit value results in a 48-bit tick counter.
+ // This extends the total rollover period from about 49 days to about 8800
+ // years while still allowing it to be stored with last_8 in a single
+ // 32-bit value.
+ uint16_t rollovers;
+ } as_values;
+};
+subtle::Atomic32 g_last_time_and_rollovers = 0;
+static_assert(
+ sizeof(LastTimeAndRolloversState) <= sizeof(g_last_time_and_rollovers),
+ "LastTimeAndRolloversState does not fit in a single atomic word");
+
+// We use timeGetTime() to implement TimeTicks::Now(). This can be problematic
+// because it returns the number of milliseconds since Windows has started,
+// which will roll over the 32-bit value every ~49 days. We try to track
+// rollover ourselves, which works if TimeTicks::Now() is called at least every
+// 48.8 days (not 49 days because only changes in the top 8 bits get noticed).
+TimeTicks RolloverProtectedNow() {
+ LastTimeAndRolloversState state;
+ DWORD now; // DWORD is always unsigned 32 bits.
+
+ while (true) {
+ // Fetch the "now" and "last" tick values, updating "last" with "now" and
+ // incrementing the "rollovers" counter if the tick-value has wrapped back
+ // around. Atomic operations ensure that both "last" and "rollovers" are
+ // always updated together.
+ int32_t original = subtle::Acquire_Load(&g_last_time_and_rollovers);
+ state.as_opaque_32 = original;
+ now = g_tick_function();
+ uint8_t now_8 = static_cast<uint8_t>(now >> 24);
+ if (now_8 < state.as_values.last_8)
+ ++state.as_values.rollovers;
+ state.as_values.last_8 = now_8;
+
+ // If the state hasn't changed, exit the loop.
+ if (state.as_opaque_32 == original)
+ break;
+
+ // Save the changed state. If the existing value is unchanged from the
+ // original, exit the loop.
+ int32_t check = subtle::Release_CompareAndSwap(
+ &g_last_time_and_rollovers, original, state.as_opaque_32);
+ if (check == original)
+ break;
+
+ // Another thread has done something in between so retry from the top.
+ }
+
+ return TimeTicks() +
+ TimeDelta::FromMilliseconds(
+ now + (static_cast<uint64_t>(state.as_values.rollovers) << 32));
+}
+
+// Discussion of tick counter options on Windows:
+//
+// (1) CPU cycle counter. (Retrieved via RDTSC)
+// The CPU counter provides the highest resolution time stamp and is the least
+// expensive to retrieve. However, on older CPUs, two issues can affect its
+// reliability: First it is maintained per processor and not synchronized
+// between processors. Also, the counters will change frequency due to thermal
+// and power changes, and stop in some states.
+//
+// (2) QueryPerformanceCounter (QPC). The QPC counter provides a high-
+// resolution (<1 microsecond) time stamp. On most hardware running today, it
+// auto-detects and uses the constant-rate RDTSC counter to provide extremely
+// efficient and reliable time stamps.
+//
+// On older CPUs where RDTSC is unreliable, it falls back to using more
+// expensive (20X to 40X more costly) alternate clocks, such as HPET or the ACPI
+// PM timer, and can involve system calls; and all this is up to the HAL (with
+// some help from ACPI). According to
+// http://blogs.msdn.com/oldnewthing/archive/2005/09/02/459952.aspx, in the
+// worst case, it gets the counter from the rollover interrupt on the
+// programmable interrupt timer. In best cases, the HAL may conclude that the
+// RDTSC counter runs at a constant frequency, then it uses that instead. On
+// multiprocessor machines, it will try to verify the values returned from
+// RDTSC on each processor are consistent with each other, and apply a handful
+// of workarounds for known buggy hardware. In other words, QPC is supposed to
+// give consistent results on a multiprocessor computer, but for older CPUs it
+// can be unreliable due bugs in BIOS or HAL.
+//
+// (3) System time. The system time provides a low-resolution (from ~1 to ~15.6
+// milliseconds) time stamp but is comparatively less expensive to retrieve and
+// more reliable. Time::EnableHighResolutionTimer() and
+// Time::ActivateHighResolutionTimer() can be called to alter the resolution of
+// this timer; and also other Windows applications can alter it, affecting this
+// one.
+
+TimeTicks InitialNowFunction();
+
+// See "threading notes" in InitializeNowFunctionPointer() for details on how
+// concurrent reads/writes to these globals has been made safe.
+TimeTicksNowFunction g_time_ticks_now_ignoring_override_function =
+ &InitialNowFunction;
+int64_t g_qpc_ticks_per_second = 0;
+
+// As of January 2015, use of <atomic> is forbidden in Chromium code. This is
+// what std::atomic_thread_fence does on Windows on all Intel architectures when
+// the memory_order argument is anything but std::memory_order_seq_cst:
+#define ATOMIC_THREAD_FENCE(memory_order) _ReadWriteBarrier();
+
+TimeDelta QPCValueToTimeDelta(LONGLONG qpc_value) {
+ // Ensure that the assignment to |g_qpc_ticks_per_second|, made in
+ // InitializeNowFunctionPointer(), has happened by this point.
+ ATOMIC_THREAD_FENCE(memory_order_acquire);
+
+ DCHECK_GT(g_qpc_ticks_per_second, 0);
+
+ // If the QPC Value is below the overflow threshold, we proceed with
+ // simple multiply and divide.
+ if (qpc_value < Time::kQPCOverflowThreshold) {
+ return TimeDelta::FromMicroseconds(
+ qpc_value * Time::kMicrosecondsPerSecond / g_qpc_ticks_per_second);
+ }
+ // Otherwise, calculate microseconds in a round about manner to avoid
+ // overflow and precision issues.
+ int64_t whole_seconds = qpc_value / g_qpc_ticks_per_second;
+ int64_t leftover_ticks = qpc_value - (whole_seconds * g_qpc_ticks_per_second);
+ return TimeDelta::FromMicroseconds(
+ (whole_seconds * Time::kMicrosecondsPerSecond) +
+ ((leftover_ticks * Time::kMicrosecondsPerSecond) /
+ g_qpc_ticks_per_second));
+}
+
+TimeTicks QPCNow() {
+ return TimeTicks() + QPCValueToTimeDelta(QPCNowRaw());
+}
+
+void InitializeNowFunctionPointer() {
+ LARGE_INTEGER ticks_per_sec = {};
+ if (!QueryPerformanceFrequency(&ticks_per_sec))
+ ticks_per_sec.QuadPart = 0;
+
+ // If Windows cannot provide a QPC implementation, TimeTicks::Now() must use
+ // the low-resolution clock.
+ //
+ // If the QPC implementation is expensive and/or unreliable, TimeTicks::Now()
+ // will still use the low-resolution clock. A CPU lacking a non-stop time
+ // counter will cause Windows to provide an alternate QPC implementation that
+ // works, but is expensive to use.
+ //
+ // Otherwise, Now uses the high-resolution QPC clock. As of 21 August 2015,
+ // ~72% of users fall within this category.
+ TimeTicksNowFunction now_function;
+ CPU cpu;
+ if (ticks_per_sec.QuadPart <= 0 || !cpu.has_non_stop_time_stamp_counter()) {
+ now_function = &RolloverProtectedNow;
+ } else {
+ now_function = &QPCNow;
+ }
+
+ // Threading note 1: In an unlikely race condition, it's possible for two or
+ // more threads to enter InitializeNowFunctionPointer() in parallel. This is
+ // not a problem since all threads should end up writing out the same values
+ // to the global variables.
+ //
+ // Threading note 2: A release fence is placed here to ensure, from the
+ // perspective of other threads using the function pointers, that the
+ // assignment to |g_qpc_ticks_per_second| happens before the function pointers
+ // are changed.
+ g_qpc_ticks_per_second = ticks_per_sec.QuadPart;
+ ATOMIC_THREAD_FENCE(memory_order_release);
+ // Also set g_time_ticks_now_function to avoid the additional indirection via
+ // TimeTicksNowIgnoringOverride() for future calls to TimeTicks::Now(). But
+ // g_time_ticks_now_function may have already be overridden.
+ if (internal::g_time_ticks_now_function ==
+ &subtle::TimeTicksNowIgnoringOverride) {
+ internal::g_time_ticks_now_function = now_function;
+ }
+ g_time_ticks_now_ignoring_override_function = now_function;
+}
+
+TimeTicks InitialNowFunction() {
+ InitializeNowFunctionPointer();
+ return g_time_ticks_now_ignoring_override_function();
+}
+
+} // namespace
+
+// static
+TimeTicks::TickFunctionType TimeTicks::SetMockTickFunction(
+ TickFunctionType ticker) {
+ TickFunctionType old = g_tick_function;
+ g_tick_function = ticker;
+ subtle::NoBarrier_Store(&g_last_time_and_rollovers, 0);
+ return old;
+}
+
+namespace subtle {
+TimeTicks TimeTicksNowIgnoringOverride() {
+ return g_time_ticks_now_ignoring_override_function();
+}
+} // namespace subtle
+
+// static
+bool TimeTicks::IsHighResolution() {
+ if (g_time_ticks_now_ignoring_override_function == &InitialNowFunction)
+ InitializeNowFunctionPointer();
+ return g_time_ticks_now_ignoring_override_function == &QPCNow;
+}
+
+// static
+bool TimeTicks::IsConsistentAcrossProcesses() {
+ // According to Windows documentation [1] QPC is consistent post-Windows
+ // Vista. So if we are using QPC then we are consistent which is the same as
+ // being high resolution.
+ //
+ // [1] https://msdn.microsoft.com/en-us/library/windows/desktop/dn553408(v=vs.85).aspx
+ //
+ // "In general, the performance counter results are consistent across all
+ // processors in multi-core and multi-processor systems, even when measured on
+ // different threads or processes. Here are some exceptions to this rule:
+ // - Pre-Windows Vista operating systems that run on certain processors might
+ // violate this consistency because of one of these reasons:
+ // 1. The hardware processors have a non-invariant TSC and the BIOS
+ // doesn't indicate this condition correctly.
+ // 2. The TSC synchronization algorithm that was used wasn't suitable for
+ // systems with large numbers of processors."
+ return IsHighResolution();
+}
+
+// static
+TimeTicks::Clock TimeTicks::GetClock() {
+ return IsHighResolution() ?
+ Clock::WIN_QPC : Clock::WIN_ROLLOVER_PROTECTED_TIME_GET_TIME;
+}
+
+// ThreadTicks ----------------------------------------------------------------
+
+namespace subtle {
+ThreadTicks ThreadTicksNowIgnoringOverride() {
+ return ThreadTicks::GetForThread(PlatformThread::CurrentHandle());
+}
+} // namespace subtle
+
+// static
+ThreadTicks ThreadTicks::GetForThread(
+ const PlatformThreadHandle& thread_handle) {
+ DCHECK(IsSupported());
+
+#if defined(ARCH_CPU_ARM64)
+ // QueryThreadCycleTime versus TSCTicksPerSecond doesn't have much relation to
+ // actual elapsed time on Windows on Arm, because QueryThreadCycleTime is
+ // backed by the actual number of CPU cycles executed, rather than a
+ // constant-rate timer like Intel. To work around this, use GetThreadTimes
+ // (which isn't as accurate but is meaningful as a measure of elapsed
+ // per-thread time).
+ FILETIME creation_time, exit_time, kernel_time, user_time;
+ ::GetThreadTimes(thread_handle.platform_handle(), &creation_time, &exit_time,
+ &kernel_time, &user_time);
+
+ int64_t us = FileTimeToMicroseconds(user_time);
+ return ThreadTicks(us);
+#else
+ // Get the number of TSC ticks used by the current thread.
+ ULONG64 thread_cycle_time = 0;
+ ::QueryThreadCycleTime(thread_handle.platform_handle(), &thread_cycle_time);
+
+ // Get the frequency of the TSC.
+ double tsc_ticks_per_second = TSCTicksPerSecond();
+ if (tsc_ticks_per_second == 0)
+ return ThreadTicks();
+
+ // Return the CPU time of the current thread.
+ double thread_time_seconds = thread_cycle_time / tsc_ticks_per_second;
+ return ThreadTicks(
+ static_cast<int64_t>(thread_time_seconds * Time::kMicrosecondsPerSecond));
+#endif
+}
+
+// static
+bool ThreadTicks::IsSupportedWin() {
+ static bool is_supported = CPU().has_non_stop_time_stamp_counter();
+ return is_supported;
+}
+
+// static
+void ThreadTicks::WaitUntilInitializedWin() {
+#if !defined(ARCH_CPU_ARM64)
+ while (TSCTicksPerSecond() == 0)
+ ::Sleep(10);
+#endif
+}
+
+#if !defined(ARCH_CPU_ARM64)
+double ThreadTicks::TSCTicksPerSecond() {
+ DCHECK(IsSupported());
+ // The value returned by QueryPerformanceFrequency() cannot be used as the TSC
+ // frequency, because there is no guarantee that the TSC frequency is equal to
+ // the performance counter frequency.
+ // The TSC frequency is cached in a static variable because it takes some time
+ // to compute it.
+ static double tsc_ticks_per_second = 0;
+ if (tsc_ticks_per_second != 0)
+ return tsc_ticks_per_second;
+
+ // Increase the thread priority to reduces the chances of having a context
+ // switch during a reading of the TSC and the performance counter.
+ int previous_priority = ::GetThreadPriority(::GetCurrentThread());
+ ::SetThreadPriority(::GetCurrentThread(), THREAD_PRIORITY_HIGHEST);
+
+ // The first time that this function is called, make an initial reading of the
+ // TSC and the performance counter.
+
+ static const uint64_t tsc_initial = __rdtsc();
+ static const uint64_t perf_counter_initial = QPCNowRaw();
+
+ // Make a another reading of the TSC and the performance counter every time
+ // that this function is called.
+ uint64_t tsc_now = __rdtsc();
+ uint64_t perf_counter_now = QPCNowRaw();
+
+ // Reset the thread priority.
+ ::SetThreadPriority(::GetCurrentThread(), previous_priority);
+
+ // Make sure that at least 50 ms elapsed between the 2 readings. The first
+ // time that this function is called, we don't expect this to be the case.
+ // Note: The longer the elapsed time between the 2 readings is, the more
+ // accurate the computed TSC frequency will be. The 50 ms value was
+ // chosen because local benchmarks show that it allows us to get a
+ // stddev of less than 1 tick/us between multiple runs.
+ // Note: According to the MSDN documentation for QueryPerformanceFrequency(),
+ // this will never fail on systems that run XP or later.
+ // https://msdn.microsoft.com/library/windows/desktop/ms644905.aspx
+ LARGE_INTEGER perf_counter_frequency = {};
+ ::QueryPerformanceFrequency(&perf_counter_frequency);
+ DCHECK_GE(perf_counter_now, perf_counter_initial);
+ uint64_t perf_counter_ticks = perf_counter_now - perf_counter_initial;
+ double elapsed_time_seconds =
+ perf_counter_ticks / static_cast<double>(perf_counter_frequency.QuadPart);
+
+ static constexpr double kMinimumEvaluationPeriodSeconds = 0.05;
+ if (elapsed_time_seconds < kMinimumEvaluationPeriodSeconds)
+ return 0;
+
+ // Compute the frequency of the TSC.
+ DCHECK_GE(tsc_now, tsc_initial);
+ uint64_t tsc_ticks = tsc_now - tsc_initial;
+ tsc_ticks_per_second = tsc_ticks / elapsed_time_seconds;
+
+ return tsc_ticks_per_second;
+}
+#endif // defined(ARCH_CPU_ARM64)
+
+// static
+TimeTicks TimeTicks::FromQPCValue(LONGLONG qpc_value) {
+ return TimeTicks() + QPCValueToTimeDelta(qpc_value);
+}
+
+// TimeDelta ------------------------------------------------------------------
+
+// static
+TimeDelta TimeDelta::FromQPCValue(LONGLONG qpc_value) {
+ return QPCValueToTimeDelta(qpc_value);
+}
+
+// static
+TimeDelta TimeDelta::FromFileTime(FILETIME ft) {
+ return TimeDelta::FromMicroseconds(FileTimeToMicroseconds(ft));
+}
+
+// static
+TimeDelta TimeDelta::FromWinrtDateTime(ABI::Windows::Foundation::DateTime dt) {
+ // UniversalTime is 100 ns intervals since January 1, 1601 (UTC)
+ return TimeDelta::FromMicroseconds(dt.UniversalTime / 10);
+}
+
+ABI::Windows::Foundation::DateTime TimeDelta::ToWinrtDateTime() const {
+ ABI::Windows::Foundation::DateTime date_time;
+ date_time.UniversalTime = InMicroseconds() * 10;
+ return date_time;
+}
+
+} // namespace base
diff --git a/security/sandbox/chromium/base/time/time_win_features.cc b/security/sandbox/chromium/base/time/time_win_features.cc
new file mode 100644
index 0000000000..edf3024e70
--- /dev/null
+++ b/security/sandbox/chromium/base/time/time_win_features.cc
@@ -0,0 +1,14 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/time/time_win_features.h"
+
+#include "base/feature_list.h"
+
+namespace base {
+
+const Feature kSlowDCTimerInterruptsWin{"SlowDCTimerInterruptsWin",
+ FEATURE_DISABLED_BY_DEFAULT};
+
+} // namespace base
diff --git a/security/sandbox/chromium/base/time/time_win_features.h b/security/sandbox/chromium/base/time/time_win_features.h
new file mode 100644
index 0000000000..b586ec2419
--- /dev/null
+++ b/security/sandbox/chromium/base/time/time_win_features.h
@@ -0,0 +1,20 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TIME_TIME_WIN_FEATURES_H_
+#define BASE_TIME_TIME_WIN_FEATURES_H_
+
+#include "base/base_export.h"
+
+namespace base {
+
+struct Feature;
+
+// Slow the maximum interrupt timer on battery power to 8 ms, instead of the
+// default 4 ms.
+extern const BASE_EXPORT Feature kSlowDCTimerInterruptsWin;
+
+} // namespace base
+
+#endif // BASE_TIME_TIME_WIN_FEATURES_H_
diff --git a/security/sandbox/chromium/base/token.cc b/security/sandbox/chromium/base/token.cc
new file mode 100644
index 0000000000..e7ad896713
--- /dev/null
+++ b/security/sandbox/chromium/base/token.cc
@@ -0,0 +1,28 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/token.h"
+
+#include <inttypes.h>
+
+#include "base/rand_util.h"
+#include "base/strings/stringprintf.h"
+
+namespace base {
+
+// static
+Token Token::CreateRandom() {
+ Token token;
+
+ // Use base::RandBytes instead of crypto::RandBytes, because crypto calls the
+ // base version directly, and to prevent the dependency from base/ to crypto/.
+ base::RandBytes(&token, sizeof(token));
+ return token;
+}
+
+std::string Token::ToString() const {
+ return base::StringPrintf("%016" PRIX64 "%016" PRIX64, high_, low_);
+}
+
+} // namespace base
diff --git a/security/sandbox/chromium/base/token.h b/security/sandbox/chromium/base/token.h
new file mode 100644
index 0000000000..e2843a6d14
--- /dev/null
+++ b/security/sandbox/chromium/base/token.h
@@ -0,0 +1,72 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TOKEN_H_
+#define BASE_TOKEN_H_
+
+#include <stdint.h>
+
+#include <iosfwd>
+#include <tuple>
+
+#include "base/base_export.h"
+#include "base/hash/hash.h"
+
+namespace base {
+
+// A Token is a randomly chosen 128-bit integer. This class supports generation
+// from a cryptographically strong random source, or constexpr construction over
+// fixed values (e.g. to store a pre-generated constant value). Tokens are
+// similar in spirit and purpose to UUIDs, without many of the constraints and
+// expectations (such as byte layout and string representation) clasically
+// associated with UUIDs.
+class BASE_EXPORT Token {
+ public:
+ // Constructs a zero Token.
+ constexpr Token() : high_(0), low_(0) {}
+
+ // Constructs a Token with |high| and |low| as its contents.
+ constexpr Token(uint64_t high, uint64_t low) : high_(high), low_(low) {}
+
+ // Constructs a new Token with random |high| and |low| values taken from a
+ // cryptographically strong random source.
+ static Token CreateRandom();
+
+ // The high and low 64 bits of this Token.
+ uint64_t high() const { return high_; }
+ uint64_t low() const { return low_; }
+
+ bool is_zero() const { return high_ == 0 && low_ == 0; }
+
+ bool operator==(const Token& other) const {
+ return high_ == other.high_ && low_ == other.low_;
+ }
+
+ bool operator!=(const Token& other) const { return !(*this == other); }
+
+ bool operator<(const Token& other) const {
+ return std::tie(high_, low_) < std::tie(other.high_, other.low_);
+ }
+
+ // Generates a string representation of this Token useful for e.g. logging.
+ std::string ToString() const;
+
+ private:
+ // Note: Two uint64_t are used instead of uint8_t[16] in order to have a
+ // simpler implementation, paricularly for |ToString()|, |is_zero()|, and
+ // constexpr value construction.
+ uint64_t high_;
+ uint64_t low_;
+};
+
+// For use in std::unordered_map.
+struct TokenHash {
+ size_t operator()(const base::Token& token) const {
+ return base::HashInts64(token.high(), token.low());
+ }
+};
+
+} // namespace base
+
+#endif // BASE_TOKEN_H_
diff --git a/security/sandbox/chromium/base/tuple.h b/security/sandbox/chromium/base/tuple.h
new file mode 100644
index 0000000000..58681d515a
--- /dev/null
+++ b/security/sandbox/chromium/base/tuple.h
@@ -0,0 +1,112 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Use std::tuple as tuple type. This file contains helper functions for
+// working with std::tuples.
+// The functions DispatchToMethod and DispatchToFunction take a function pointer
+// or instance and method pointer, and unpack a tuple into arguments to the
+// call.
+//
+// Example usage:
+// // These two methods of creating a Tuple are identical.
+// std::tuple<int, const char*> tuple_a(1, "wee");
+// std::tuple<int, const char*> tuple_b = std::make_tuple(1, "wee");
+//
+// void SomeFunc(int a, const char* b) { }
+// DispatchToFunction(&SomeFunc, tuple_a); // SomeFunc(1, "wee")
+// DispatchToFunction(
+// &SomeFunc, std::make_tuple(10, "foo")); // SomeFunc(10, "foo")
+//
+// struct { void SomeMeth(int a, int b, int c) { } } foo;
+// DispatchToMethod(&foo, &Foo::SomeMeth, std::make_tuple(1, 2, 3));
+// // foo->SomeMeth(1, 2, 3);
+
+#ifndef BASE_TUPLE_H_
+#define BASE_TUPLE_H_
+
+#include <stddef.h>
+#include <tuple>
+#include <utility>
+
+#include "build/build_config.h"
+
+namespace base {
+
+// Dispatchers ----------------------------------------------------------------
+//
+// Helper functions that call the given method on an object, with the unpacked
+// tuple arguments. Notice that they all have the same number of arguments,
+// so you need only write:
+// DispatchToMethod(object, &Object::method, args);
+// This is very useful for templated dispatchers, since they don't need to know
+// what type |args| is.
+
+// Non-Static Dispatchers with no out params.
+
+template <typename ObjT, typename Method, typename Tuple, size_t... Ns>
+inline void DispatchToMethodImpl(const ObjT& obj,
+ Method method,
+ Tuple&& args,
+ std::index_sequence<Ns...>) {
+ (obj->*method)(std::get<Ns>(std::forward<Tuple>(args))...);
+}
+
+template <typename ObjT, typename Method, typename Tuple>
+inline void DispatchToMethod(const ObjT& obj,
+ Method method,
+ Tuple&& args) {
+ constexpr size_t size = std::tuple_size<std::decay_t<Tuple>>::value;
+ DispatchToMethodImpl(obj, method, std::forward<Tuple>(args),
+ std::make_index_sequence<size>());
+}
+
+// Static Dispatchers with no out params.
+
+template <typename Function, typename Tuple, size_t... Ns>
+inline void DispatchToFunctionImpl(Function function,
+ Tuple&& args,
+ std::index_sequence<Ns...>) {
+ (*function)(std::get<Ns>(std::forward<Tuple>(args))...);
+}
+
+template <typename Function, typename Tuple>
+inline void DispatchToFunction(Function function, Tuple&& args) {
+ constexpr size_t size = std::tuple_size<std::decay_t<Tuple>>::value;
+ DispatchToFunctionImpl(function, std::forward<Tuple>(args),
+ std::make_index_sequence<size>());
+}
+
+// Dispatchers with out parameters.
+
+template <typename ObjT,
+ typename Method,
+ typename InTuple,
+ typename OutTuple,
+ size_t... InNs,
+ size_t... OutNs>
+inline void DispatchToMethodImpl(const ObjT& obj,
+ Method method,
+ InTuple&& in,
+ OutTuple* out,
+ std::index_sequence<InNs...>,
+ std::index_sequence<OutNs...>) {
+ (obj->*method)(std::get<InNs>(std::forward<InTuple>(in))...,
+ &std::get<OutNs>(*out)...);
+}
+
+template <typename ObjT, typename Method, typename InTuple, typename OutTuple>
+inline void DispatchToMethod(const ObjT& obj,
+ Method method,
+ InTuple&& in,
+ OutTuple* out) {
+ constexpr size_t in_size = std::tuple_size<std::decay_t<InTuple>>::value;
+ constexpr size_t out_size = std::tuple_size<OutTuple>::value;
+ DispatchToMethodImpl(obj, method, std::forward<InTuple>(in), out,
+ std::make_index_sequence<in_size>(),
+ std::make_index_sequence<out_size>());
+}
+
+} // namespace base
+
+#endif // BASE_TUPLE_H_
diff --git a/security/sandbox/chromium/base/unguessable_token.cc b/security/sandbox/chromium/base/unguessable_token.cc
new file mode 100644
index 0000000000..973b4167bd
--- /dev/null
+++ b/security/sandbox/chromium/base/unguessable_token.cc
@@ -0,0 +1,39 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/unguessable_token.h"
+
+#include "base/format_macros.h"
+#include "base/no_destructor.h"
+#include "base/rand_util.h"
+#include "base/strings/stringprintf.h"
+
+namespace base {
+
+UnguessableToken::UnguessableToken(const base::Token& token) : token_(token) {}
+
+// static
+UnguessableToken UnguessableToken::Create() {
+ return UnguessableToken(Token::CreateRandom());
+}
+
+// static
+const UnguessableToken& UnguessableToken::Null() {
+ static const NoDestructor<UnguessableToken> null_token;
+ return *null_token;
+}
+
+// static
+UnguessableToken UnguessableToken::Deserialize(uint64_t high, uint64_t low) {
+ // Receiving a zeroed out UnguessableToken from another process means that it
+ // was never initialized via Create(). Treat this case as a security issue.
+ DCHECK(!(high == 0 && low == 0));
+ return UnguessableToken(Token{high, low});
+}
+
+std::ostream& operator<<(std::ostream& out, const UnguessableToken& token) {
+ return out << "(" << token.ToString() << ")";
+}
+
+} // namespace base
diff --git a/security/sandbox/chromium/base/unguessable_token.h b/security/sandbox/chromium/base/unguessable_token.h
new file mode 100644
index 0000000000..895dbc46c4
--- /dev/null
+++ b/security/sandbox/chromium/base/unguessable_token.h
@@ -0,0 +1,120 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_UNGUESSABLE_TOKEN_H_
+#define BASE_UNGUESSABLE_TOKEN_H_
+
+#include <stdint.h>
+#include <string.h>
+#include <iosfwd>
+#include <tuple>
+
+#include "base/base_export.h"
+#include "base/hash/hash.h"
+#include "base/logging.h"
+#include "base/token.h"
+
+namespace base {
+
+struct UnguessableTokenHash;
+
+// UnguessableToken is, like Token, a randomly chosen 128-bit value. Unlike
+// Token, a new UnguessableToken is always generated at runtime from a
+// cryptographically strong random source (or copied or serialized and
+// deserialized from another such UnguessableToken). It can be used as part of a
+// larger aggregate type, or as an ID in and of itself.
+//
+// An UnguessableToken is a strong *bearer token*. Bearer tokens are like HTTP
+// cookies: if a caller has the token, the callee thereby considers the caller
+// authorized to request the operation the callee performs.
+//
+// UnguessableToken can be used when the resource associated with the ID needs
+// to be protected against manipulation by other untrusted agents in the system,
+// and there is no other convenient way to verify the authority of the agent to
+// do so (because the resource is part of a table shared across processes, for
+// instance). In such a scheme, knowledge of the token value in and of itself is
+// sufficient proof of authority to carry out an operation on the associated
+// resource.
+//
+// Use Create() for creating new UnguessableTokens.
+//
+// NOTE: It is illegal to send empty UnguessableTokens across processes, and
+// sending/receiving empty tokens should be treated as a security issue. If
+// there is a valid scenario for sending "no token" across processes, use
+// base::Optional instead of an empty token.
+
+class BASE_EXPORT UnguessableToken {
+ public:
+ // Create a unique UnguessableToken.
+ static UnguessableToken Create();
+
+ // Returns a reference to a global null UnguessableToken. This should only be
+ // used for functions that need to return a reference to an UnguessableToken,
+ // and should not be used as a general-purpose substitute for invoking the
+ // default constructor.
+ static const UnguessableToken& Null();
+
+ // Return a UnguessableToken built from the high/low bytes provided.
+ // It should only be used in deserialization scenarios.
+ //
+ // NOTE: If the deserialized token is empty, it means that it was never
+ // initialized via Create(). This is a security issue, and should be handled.
+ static UnguessableToken Deserialize(uint64_t high, uint64_t low);
+
+ // Creates an empty UnguessableToken.
+ // Assign to it with Create() before using it.
+ constexpr UnguessableToken() = default;
+
+ // NOTE: Serializing an empty UnguessableToken is an illegal operation.
+ uint64_t GetHighForSerialization() const {
+ DCHECK(!is_empty());
+ return token_.high();
+ }
+
+ // NOTE: Serializing an empty UnguessableToken is an illegal operation.
+ uint64_t GetLowForSerialization() const {
+ DCHECK(!is_empty());
+ return token_.low();
+ }
+
+ bool is_empty() const { return token_.is_zero(); }
+
+ // Hex representation of the unguessable token.
+ std::string ToString() const { return token_.ToString(); }
+
+ explicit operator bool() const { return !is_empty(); }
+
+ bool operator<(const UnguessableToken& other) const {
+ return token_ < other.token_;
+ }
+
+ bool operator==(const UnguessableToken& other) const {
+ return token_ == other.token_;
+ }
+
+ bool operator!=(const UnguessableToken& other) const {
+ return !(*this == other);
+ }
+
+ private:
+ friend struct UnguessableTokenHash;
+ explicit UnguessableToken(const Token& token);
+
+ base::Token token_;
+};
+
+BASE_EXPORT std::ostream& operator<<(std::ostream& out,
+ const UnguessableToken& token);
+
+// For use in std::unordered_map.
+struct UnguessableTokenHash {
+ size_t operator()(const base::UnguessableToken& token) const {
+ DCHECK(token);
+ return TokenHash()(token.token_);
+ }
+};
+
+} // namespace base
+
+#endif // BASE_UNGUESSABLE_TOKEN_H_
diff --git a/security/sandbox/chromium/base/version.cc b/security/sandbox/chromium/base/version.cc
new file mode 100644
index 0000000000..2ee8793c03
--- /dev/null
+++ b/security/sandbox/chromium/base/version.cc
@@ -0,0 +1,194 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/version.h"
+
+#include <stddef.h>
+
+#include <algorithm>
+
+#include "base/logging.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_split.h"
+#include "base/strings/string_util.h"
+
+namespace base {
+
+namespace {
+
+// Parses the |numbers| vector representing the different numbers
+// inside the version string and constructs a vector of valid integers. It stops
+// when it reaches an invalid item (including the wildcard character). |parsed|
+// is the resulting integer vector. Function returns true if all numbers were
+// parsed successfully, false otherwise.
+bool ParseVersionNumbers(StringPiece version_str,
+ std::vector<uint32_t>* parsed) {
+ std::vector<StringPiece> numbers =
+ SplitStringPiece(version_str, ".", KEEP_WHITESPACE, SPLIT_WANT_ALL);
+ if (numbers.empty())
+ return false;
+
+ for (auto it = numbers.begin(); it != numbers.end(); ++it) {
+ if (StartsWith(*it, "+", CompareCase::SENSITIVE))
+ return false;
+
+ unsigned int num;
+ if (!StringToUint(*it, &num))
+ return false;
+
+ // This throws out leading zeros for the first item only.
+ if (it == numbers.begin() && NumberToString(num) != *it)
+ return false;
+
+ // StringToUint returns unsigned int but Version fields are uint32_t.
+ static_assert(sizeof (uint32_t) == sizeof (unsigned int),
+ "uint32_t must be same as unsigned int");
+ parsed->push_back(num);
+ }
+ return true;
+}
+
+// Compares version components in |components1| with components in
+// |components2|. Returns -1, 0 or 1 if |components1| is less than, equal to,
+// or greater than |components2|, respectively.
+int CompareVersionComponents(const std::vector<uint32_t>& components1,
+ const std::vector<uint32_t>& components2) {
+ const size_t count = std::min(components1.size(), components2.size());
+ for (size_t i = 0; i < count; ++i) {
+ if (components1[i] > components2[i])
+ return 1;
+ if (components1[i] < components2[i])
+ return -1;
+ }
+ if (components1.size() > components2.size()) {
+ for (size_t i = count; i < components1.size(); ++i) {
+ if (components1[i] > 0)
+ return 1;
+ }
+ } else if (components1.size() < components2.size()) {
+ for (size_t i = count; i < components2.size(); ++i) {
+ if (components2[i] > 0)
+ return -1;
+ }
+ }
+ return 0;
+}
+
+} // namespace
+
+Version::Version() = default;
+
+Version::Version(const Version& other) = default;
+
+Version::~Version() = default;
+
+Version::Version(StringPiece version_str) {
+ std::vector<uint32_t> parsed;
+ if (!ParseVersionNumbers(version_str, &parsed))
+ return;
+
+ components_.swap(parsed);
+}
+
+Version::Version(std::vector<uint32_t> components)
+ : components_(std::move(components)) {}
+
+bool Version::IsValid() const {
+ return (!components_.empty());
+}
+
+// static
+bool Version::IsValidWildcardString(StringPiece wildcard_string) {
+ StringPiece version_string = wildcard_string;
+ if (EndsWith(version_string, ".*", CompareCase::SENSITIVE))
+ version_string = version_string.substr(0, version_string.size() - 2);
+
+ Version version(version_string);
+ return version.IsValid();
+}
+
+int Version::CompareToWildcardString(StringPiece wildcard_string) const {
+ DCHECK(IsValid());
+ DCHECK(Version::IsValidWildcardString(wildcard_string));
+
+ // Default behavior if the string doesn't end with a wildcard.
+ if (!EndsWith(wildcard_string, ".*", CompareCase::SENSITIVE)) {
+ Version version(wildcard_string);
+ DCHECK(version.IsValid());
+ return CompareTo(version);
+ }
+
+ std::vector<uint32_t> parsed;
+ const bool success = ParseVersionNumbers(
+ wildcard_string.substr(0, wildcard_string.length() - 2), &parsed);
+ DCHECK(success);
+ const int comparison = CompareVersionComponents(components_, parsed);
+ // If the version is smaller than the wildcard version's |parsed| vector,
+ // then the wildcard has no effect (e.g. comparing 1.2.3 and 1.3.*) and the
+ // version is still smaller. Same logic for equality (e.g. comparing 1.2.2 to
+ // 1.2.2.* is 0 regardless of the wildcard). Under this logic,
+ // 1.2.0.0.0.0 compared to 1.2.* is 0.
+ if (comparison == -1 || comparison == 0)
+ return comparison;
+
+ // Catch the case where the digits of |parsed| are found in |components_|,
+ // which means that the two are equal since |parsed| has a trailing "*".
+ // (e.g. 1.2.3 vs. 1.2.* will return 0). All other cases return 1 since
+ // components is greater (e.g. 3.2.3 vs 1.*).
+ DCHECK_GT(parsed.size(), 0UL);
+ const size_t min_num_comp = std::min(components_.size(), parsed.size());
+ for (size_t i = 0; i < min_num_comp; ++i) {
+ if (components_[i] != parsed[i])
+ return 1;
+ }
+ return 0;
+}
+
+int Version::CompareTo(const Version& other) const {
+ DCHECK(IsValid());
+ DCHECK(other.IsValid());
+ return CompareVersionComponents(components_, other.components_);
+}
+
+std::string Version::GetString() const {
+ DCHECK(IsValid());
+ std::string version_str;
+ size_t count = components_.size();
+ for (size_t i = 0; i < count - 1; ++i) {
+ version_str.append(NumberToString(components_[i]));
+ version_str.append(".");
+ }
+ version_str.append(NumberToString(components_[count - 1]));
+ return version_str;
+}
+
+bool operator==(const Version& v1, const Version& v2) {
+ return v1.CompareTo(v2) == 0;
+}
+
+bool operator!=(const Version& v1, const Version& v2) {
+ return !(v1 == v2);
+}
+
+bool operator<(const Version& v1, const Version& v2) {
+ return v1.CompareTo(v2) < 0;
+}
+
+bool operator<=(const Version& v1, const Version& v2) {
+ return v1.CompareTo(v2) <= 0;
+}
+
+bool operator>(const Version& v1, const Version& v2) {
+ return v1.CompareTo(v2) > 0;
+}
+
+bool operator>=(const Version& v1, const Version& v2) {
+ return v1.CompareTo(v2) >= 0;
+}
+
+std::ostream& operator<<(std::ostream& stream, const Version& v) {
+ return stream << v.GetString();
+}
+
+} // namespace base
diff --git a/security/sandbox/chromium/base/version.h b/security/sandbox/chromium/base/version.h
new file mode 100644
index 0000000000..9199449de6
--- /dev/null
+++ b/security/sandbox/chromium/base/version.h
@@ -0,0 +1,77 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_VERSION_H_
+#define BASE_VERSION_H_
+
+#include <stdint.h>
+
+#include <iosfwd>
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/strings/string_piece.h"
+
+namespace base {
+
+// Version represents a dotted version number, like "1.2.3.4", supporting
+// parsing and comparison.
+class BASE_EXPORT Version {
+ public:
+ // The only thing you can legally do to a default constructed
+ // Version object is assign to it.
+ Version();
+
+ Version(const Version& other);
+
+ // Initializes from a decimal dotted version number, like "0.1.1".
+ // Each component is limited to a uint16_t. Call IsValid() to learn
+ // the outcome.
+ explicit Version(StringPiece version_str);
+
+ // Initializes from a vector of components, like {1, 2, 3, 4}. Call IsValid()
+ // to learn the outcome.
+ explicit Version(std::vector<uint32_t> components);
+
+ ~Version();
+
+ // Returns true if the object contains a valid version number.
+ bool IsValid() const;
+
+ // Returns true if the version wildcard string is valid. The version wildcard
+ // string may end with ".*" (e.g. 1.2.*, 1.*). Any other arrangement with "*"
+ // is invalid (e.g. 1.*.3 or 1.2.3*). This functions defaults to standard
+ // Version behavior (IsValid) if no wildcard is present.
+ static bool IsValidWildcardString(StringPiece wildcard_string);
+
+ // Returns -1, 0, 1 for <, ==, >.
+ int CompareTo(const Version& other) const;
+
+ // Given a valid version object, compare if a |wildcard_string| results in a
+ // newer version. This function will default to CompareTo if the string does
+ // not end in wildcard sequence ".*". IsValidWildcard(wildcard_string) must be
+ // true before using this function.
+ int CompareToWildcardString(StringPiece wildcard_string) const;
+
+ // Return the string representation of this version.
+ std::string GetString() const;
+
+ const std::vector<uint32_t>& components() const { return components_; }
+
+ private:
+ std::vector<uint32_t> components_;
+};
+
+BASE_EXPORT bool operator==(const Version& v1, const Version& v2);
+BASE_EXPORT bool operator!=(const Version& v1, const Version& v2);
+BASE_EXPORT bool operator<(const Version& v1, const Version& v2);
+BASE_EXPORT bool operator<=(const Version& v1, const Version& v2);
+BASE_EXPORT bool operator>(const Version& v1, const Version& v2);
+BASE_EXPORT bool operator>=(const Version& v1, const Version& v2);
+BASE_EXPORT std::ostream& operator<<(std::ostream& stream, const Version& v);
+
+} // namespace base
+
+#endif // BASE_VERSION_H_
diff --git a/security/sandbox/chromium/base/win/current_module.h b/security/sandbox/chromium/base/win/current_module.h
new file mode 100644
index 0000000000..ee141db211
--- /dev/null
+++ b/security/sandbox/chromium/base/win/current_module.h
@@ -0,0 +1,17 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_CURRENT_MODULE_H_
+#define BASE_WIN_CURRENT_MODULE_H_
+
+#include <windows.h>
+
+// http://blogs.msdn.com/oldnewthing/archive/2004/10/25/247180.aspx
+extern "C" IMAGE_DOS_HEADER __ImageBase;
+
+// Returns the HMODULE of the dll the macro was expanded in.
+// Only use in cc files, not in h files.
+#define CURRENT_MODULE() reinterpret_cast<HMODULE>(&__ImageBase)
+
+#endif // BASE_WIN_CURRENT_MODULE_H_
diff --git a/security/sandbox/chromium/base/win/pe_image.cc b/security/sandbox/chromium/base/win/pe_image.cc
new file mode 100644
index 0000000000..3d52964a24
--- /dev/null
+++ b/security/sandbox/chromium/base/win/pe_image.cc
@@ -0,0 +1,652 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file implements PEImage, a generic class to manipulate PE files.
+// This file was adapted from GreenBorder's Code.
+
+#include "base/win/pe_image.h"
+
+#include <stddef.h>
+#include <set>
+#include <string>
+
+#include "base/no_destructor.h"
+#include "base/win/current_module.h"
+
+namespace base {
+namespace win {
+
+// Structure to perform imports enumerations.
+struct EnumAllImportsStorage {
+ PEImage::EnumImportsFunction callback;
+ PVOID cookie;
+};
+
+namespace {
+
+// PdbInfo Signature
+const DWORD kPdbInfoSignature = 'SDSR';
+
+// Compare two strings byte by byte on an unsigned basis.
+// if s1 == s2, return 0
+// if s1 < s2, return negative
+// if s1 > s2, return positive
+// Exception if inputs are invalid.
+int StrCmpByByte(LPCSTR s1, LPCSTR s2) {
+ while (*s1 != '\0' && *s1 == *s2) {
+ ++s1;
+ ++s2;
+ }
+
+ return (*reinterpret_cast<const unsigned char*>(s1) -
+ *reinterpret_cast<const unsigned char*>(s2));
+}
+
+struct PdbInfo {
+ DWORD Signature;
+ GUID Guid;
+ DWORD Age;
+ char PdbFileName[1];
+};
+
+#define LDR_IS_DATAFILE(handle) (((ULONG_PTR)(handle)) & (ULONG_PTR)1)
+#define LDR_IS_IMAGEMAPPING(handle) (((ULONG_PTR)(handle)) & (ULONG_PTR)2)
+#define LDR_IS_RESOURCE(handle) \
+ (LDR_IS_IMAGEMAPPING(handle) || LDR_IS_DATAFILE(handle))
+
+} // namespace
+
+// Callback used to enumerate imports. See EnumImportChunksFunction.
+bool ProcessImportChunk(const PEImage& image,
+ LPCSTR module,
+ PIMAGE_THUNK_DATA name_table,
+ PIMAGE_THUNK_DATA iat,
+ PVOID cookie) {
+ EnumAllImportsStorage& storage =
+ *reinterpret_cast<EnumAllImportsStorage*>(cookie);
+
+ return image.EnumOneImportChunk(storage.callback, module, name_table, iat,
+ storage.cookie);
+}
+
+// Callback used to enumerate delay imports. See EnumDelayImportChunksFunction.
+bool ProcessDelayImportChunk(const PEImage& image,
+ PImgDelayDescr delay_descriptor,
+ LPCSTR module,
+ PIMAGE_THUNK_DATA name_table,
+ PIMAGE_THUNK_DATA iat,
+ PVOID cookie) {
+ EnumAllImportsStorage& storage =
+ *reinterpret_cast<EnumAllImportsStorage*>(cookie);
+
+ return image.EnumOneDelayImportChunk(storage.callback, delay_descriptor,
+ module, name_table, iat, storage.cookie);
+}
+
+void PEImage::set_module(HMODULE module) {
+ module_ = module;
+}
+
+PIMAGE_DOS_HEADER PEImage::GetDosHeader() const {
+ return reinterpret_cast<PIMAGE_DOS_HEADER>(module_);
+}
+
+PIMAGE_NT_HEADERS PEImage::GetNTHeaders() const {
+ PIMAGE_DOS_HEADER dos_header = GetDosHeader();
+
+ return reinterpret_cast<PIMAGE_NT_HEADERS>(
+ reinterpret_cast<char*>(dos_header) + dos_header->e_lfanew);
+}
+
+PIMAGE_SECTION_HEADER PEImage::GetSectionHeader(UINT section) const {
+ PIMAGE_NT_HEADERS nt_headers = GetNTHeaders();
+ PIMAGE_SECTION_HEADER first_section = IMAGE_FIRST_SECTION(nt_headers);
+
+ if (section < nt_headers->FileHeader.NumberOfSections)
+ return first_section + section;
+ else
+ return nullptr;
+}
+
+WORD PEImage::GetNumSections() const {
+ return GetNTHeaders()->FileHeader.NumberOfSections;
+}
+
+DWORD PEImage::GetImageDirectoryEntrySize(UINT directory) const {
+ const IMAGE_DATA_DIRECTORY* const entry = GetDataDirectory(directory);
+ return entry ? entry->Size : 0;
+}
+
+PVOID PEImage::GetImageDirectoryEntryAddr(UINT directory) const {
+ const IMAGE_DATA_DIRECTORY* const entry = GetDataDirectory(directory);
+ return entry ? RVAToAddr(entry->VirtualAddress) : nullptr;
+}
+
+PIMAGE_SECTION_HEADER PEImage::GetImageSectionFromAddr(PVOID address) const {
+ PBYTE target = reinterpret_cast<PBYTE>(address);
+ PIMAGE_SECTION_HEADER section;
+
+ for (UINT i = 0; nullptr != (section = GetSectionHeader(i)); i++) {
+ // Don't use the virtual RVAToAddr.
+ PBYTE start =
+ reinterpret_cast<PBYTE>(PEImage::RVAToAddr(section->VirtualAddress));
+
+ DWORD size = section->Misc.VirtualSize;
+
+ if ((start <= target) && (start + size > target))
+ return section;
+ }
+
+ return nullptr;
+}
+
+PIMAGE_SECTION_HEADER PEImage::GetImageSectionHeaderByName(
+ LPCSTR section_name) const {
+ if (nullptr == section_name)
+ return nullptr;
+
+ PIMAGE_SECTION_HEADER ret = nullptr;
+ int num_sections = GetNumSections();
+
+ for (int i = 0; i < num_sections; i++) {
+ PIMAGE_SECTION_HEADER section = GetSectionHeader(i);
+ if (_strnicmp(reinterpret_cast<LPCSTR>(section->Name), section_name,
+ sizeof(section->Name)) == 0) {
+ ret = section;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+bool PEImage::GetDebugId(LPGUID guid,
+ LPDWORD age,
+ LPCSTR* pdb_filename,
+ size_t* pdb_filename_length) const {
+ DWORD debug_directory_size =
+ GetImageDirectoryEntrySize(IMAGE_DIRECTORY_ENTRY_DEBUG);
+ PIMAGE_DEBUG_DIRECTORY debug_directory =
+ reinterpret_cast<PIMAGE_DEBUG_DIRECTORY>(
+ GetImageDirectoryEntryAddr(IMAGE_DIRECTORY_ENTRY_DEBUG));
+ if (!debug_directory)
+ return false;
+
+ size_t directory_count = debug_directory_size / sizeof(IMAGE_DEBUG_DIRECTORY);
+ for (size_t index = 0; index < directory_count; ++index) {
+ const IMAGE_DEBUG_DIRECTORY& entry = debug_directory[index];
+ if (entry.Type != IMAGE_DEBUG_TYPE_CODEVIEW)
+ continue; // Unsupported debugging info format.
+ if (entry.SizeOfData < sizeof(PdbInfo))
+ continue; // The data is too small to hold PDB info.
+ const PdbInfo* pdb_info =
+ reinterpret_cast<const PdbInfo*>(RVAToAddr(entry.AddressOfRawData));
+ if (!pdb_info)
+ continue; // The data is not present in a mapped section.
+ if (pdb_info->Signature != kPdbInfoSignature)
+ continue; // Unsupported PdbInfo signature
+
+ if (guid)
+ *guid = pdb_info->Guid;
+ if (age)
+ *age = pdb_info->Age;
+ if (pdb_filename) {
+ const size_t length_max =
+ entry.SizeOfData - offsetof(PdbInfo, PdbFileName);
+ const char* eos = pdb_info->PdbFileName;
+ for (const char* const end = pdb_info->PdbFileName + length_max;
+ eos < end && *eos; ++eos)
+ ;
+ *pdb_filename_length = eos - pdb_info->PdbFileName;
+ *pdb_filename = pdb_info->PdbFileName;
+ }
+ return true;
+ }
+ return false;
+}
+
+PDWORD PEImage::GetExportEntry(LPCSTR name) const {
+ PIMAGE_EXPORT_DIRECTORY exports = GetExportDirectory();
+
+ if (nullptr == exports)
+ return nullptr;
+
+ WORD ordinal = 0;
+ if (!GetProcOrdinal(name, &ordinal))
+ return nullptr;
+
+ PDWORD functions =
+ reinterpret_cast<PDWORD>(RVAToAddr(exports->AddressOfFunctions));
+
+ return functions + ordinal - exports->Base;
+}
+
+FARPROC PEImage::GetProcAddress(LPCSTR function_name) const {
+ PDWORD export_entry = GetExportEntry(function_name);
+ if (nullptr == export_entry)
+ return nullptr;
+
+ PBYTE function = reinterpret_cast<PBYTE>(RVAToAddr(*export_entry));
+
+ PBYTE exports = reinterpret_cast<PBYTE>(
+ GetImageDirectoryEntryAddr(IMAGE_DIRECTORY_ENTRY_EXPORT));
+ DWORD size = GetImageDirectoryEntrySize(IMAGE_DIRECTORY_ENTRY_EXPORT);
+ if (!exports || !size)
+ return nullptr;
+
+ // Check for forwarded exports as a special case.
+ if (exports <= function && exports + size > function)
+ return reinterpret_cast<FARPROC>(-1);
+
+ return reinterpret_cast<FARPROC>(function);
+}
+
+bool PEImage::GetProcOrdinal(LPCSTR function_name, WORD* ordinal) const {
+ if (nullptr == ordinal)
+ return false;
+
+ PIMAGE_EXPORT_DIRECTORY exports = GetExportDirectory();
+
+ if (nullptr == exports)
+ return false;
+
+ if (IsOrdinal(function_name)) {
+ *ordinal = ToOrdinal(function_name);
+ } else {
+ PDWORD names = reinterpret_cast<PDWORD>(RVAToAddr(exports->AddressOfNames));
+ PDWORD lower = names;
+ PDWORD upper = names + exports->NumberOfNames;
+ int cmp = -1;
+
+ // Binary Search for the name.
+ while (lower != upper) {
+ PDWORD middle = lower + (upper - lower) / 2;
+ LPCSTR name = reinterpret_cast<LPCSTR>(RVAToAddr(*middle));
+
+ // This may be called by sandbox before MSVCRT dll loads, so can't use
+ // CRT function here.
+ cmp = StrCmpByByte(function_name, name);
+
+ if (cmp == 0) {
+ lower = middle;
+ break;
+ }
+
+ if (cmp > 0)
+ lower = middle + 1;
+ else
+ upper = middle;
+ }
+
+ if (cmp != 0)
+ return false;
+
+ PWORD ordinals =
+ reinterpret_cast<PWORD>(RVAToAddr(exports->AddressOfNameOrdinals));
+
+ *ordinal = ordinals[lower - names] + static_cast<WORD>(exports->Base);
+ }
+
+ return true;
+}
+
+bool PEImage::EnumSections(EnumSectionsFunction callback, PVOID cookie) const {
+ PIMAGE_NT_HEADERS nt_headers = GetNTHeaders();
+ UINT num_sections = nt_headers->FileHeader.NumberOfSections;
+ PIMAGE_SECTION_HEADER section = GetSectionHeader(0);
+
+ for (UINT i = 0; i < num_sections; i++, section++) {
+ PVOID section_start = RVAToAddr(section->VirtualAddress);
+ DWORD size = section->Misc.VirtualSize;
+
+ if (!callback(*this, section, section_start, size, cookie))
+ return false;
+ }
+
+ return true;
+}
+
+bool PEImage::EnumExports(EnumExportsFunction callback, PVOID cookie) const {
+ PVOID directory = GetImageDirectoryEntryAddr(IMAGE_DIRECTORY_ENTRY_EXPORT);
+ DWORD size = GetImageDirectoryEntrySize(IMAGE_DIRECTORY_ENTRY_EXPORT);
+
+ // Check if there are any exports at all.
+ if (!directory || !size)
+ return true;
+
+ PIMAGE_EXPORT_DIRECTORY exports =
+ reinterpret_cast<PIMAGE_EXPORT_DIRECTORY>(directory);
+ UINT ordinal_base = exports->Base;
+ UINT num_funcs = exports->NumberOfFunctions;
+ UINT num_names = exports->NumberOfNames;
+ PDWORD functions =
+ reinterpret_cast<PDWORD>(RVAToAddr(exports->AddressOfFunctions));
+ PDWORD names = reinterpret_cast<PDWORD>(RVAToAddr(exports->AddressOfNames));
+ PWORD ordinals =
+ reinterpret_cast<PWORD>(RVAToAddr(exports->AddressOfNameOrdinals));
+
+ for (UINT count = 0; count < num_funcs; count++) {
+ PVOID func = RVAToAddr(functions[count]);
+ if (nullptr == func)
+ continue;
+
+ // Check for a name.
+ LPCSTR name = nullptr;
+ UINT hint;
+ for (hint = 0; hint < num_names; hint++) {
+ if (ordinals[hint] == count) {
+ name = reinterpret_cast<LPCSTR>(RVAToAddr(names[hint]));
+ break;
+ }
+ }
+
+ if (name == nullptr)
+ hint = 0;
+
+ // Check for forwarded exports.
+ LPCSTR forward = nullptr;
+ if (reinterpret_cast<char*>(func) >= reinterpret_cast<char*>(directory) &&
+ reinterpret_cast<char*>(func) <=
+ reinterpret_cast<char*>(directory) + size) {
+ forward = reinterpret_cast<LPCSTR>(func);
+ func = nullptr;
+ }
+
+ if (!callback(*this, ordinal_base + count, hint, name, func, forward,
+ cookie))
+ return false;
+ }
+
+ return true;
+}
+
+bool PEImage::EnumRelocs(EnumRelocsFunction callback, PVOID cookie) const {
+ PVOID directory = GetImageDirectoryEntryAddr(IMAGE_DIRECTORY_ENTRY_BASERELOC);
+ DWORD size = GetImageDirectoryEntrySize(IMAGE_DIRECTORY_ENTRY_BASERELOC);
+
+ if (!directory || !size)
+ return true;
+
+ PIMAGE_BASE_RELOCATION base =
+ reinterpret_cast<PIMAGE_BASE_RELOCATION>(directory);
+ while (size >= sizeof(IMAGE_BASE_RELOCATION) && base->SizeOfBlock &&
+ size >= base->SizeOfBlock) {
+ PWORD reloc = reinterpret_cast<PWORD>(base + 1);
+ UINT num_relocs =
+ (base->SizeOfBlock - sizeof(IMAGE_BASE_RELOCATION)) / sizeof(WORD);
+
+ for (UINT i = 0; i < num_relocs; i++, reloc++) {
+ WORD type = *reloc >> 12;
+ PVOID address = RVAToAddr(base->VirtualAddress + (*reloc & 0x0FFF));
+
+ if (!callback(*this, type, address, cookie))
+ return false;
+ }
+
+ size -= base->SizeOfBlock;
+ base = reinterpret_cast<PIMAGE_BASE_RELOCATION>(
+ reinterpret_cast<char*>(base) + base->SizeOfBlock);
+ }
+
+ return true;
+}
+
+bool PEImage::EnumImportChunks(EnumImportChunksFunction callback,
+ PVOID cookie,
+ LPCSTR target_module_name) const {
+ DWORD size = GetImageDirectoryEntrySize(IMAGE_DIRECTORY_ENTRY_IMPORT);
+ PIMAGE_IMPORT_DESCRIPTOR import = GetFirstImportChunk();
+
+ if (import == nullptr || size < sizeof(IMAGE_IMPORT_DESCRIPTOR))
+ return true;
+
+ for (; import->FirstThunk; import++) {
+ LPCSTR module_name = reinterpret_cast<LPCSTR>(RVAToAddr(import->Name));
+ PIMAGE_THUNK_DATA name_table = reinterpret_cast<PIMAGE_THUNK_DATA>(
+ RVAToAddr(import->OriginalFirstThunk));
+ PIMAGE_THUNK_DATA iat =
+ reinterpret_cast<PIMAGE_THUNK_DATA>(RVAToAddr(import->FirstThunk));
+
+ if (target_module_name == nullptr ||
+ (lstrcmpiA(module_name, target_module_name) == 0)) {
+ if (!callback(*this, module_name, name_table, iat, cookie))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool PEImage::EnumOneImportChunk(EnumImportsFunction callback,
+ LPCSTR module_name,
+ PIMAGE_THUNK_DATA name_table,
+ PIMAGE_THUNK_DATA iat,
+ PVOID cookie) const {
+ if (nullptr == name_table)
+ return false;
+
+ for (; name_table && name_table->u1.Ordinal; name_table++, iat++) {
+ LPCSTR name = nullptr;
+ WORD ordinal = 0;
+ WORD hint = 0;
+
+ if (IMAGE_SNAP_BY_ORDINAL(name_table->u1.Ordinal)) {
+ ordinal = static_cast<WORD>(IMAGE_ORDINAL32(name_table->u1.Ordinal));
+ } else {
+ PIMAGE_IMPORT_BY_NAME import = reinterpret_cast<PIMAGE_IMPORT_BY_NAME>(
+ RVAToAddr(name_table->u1.ForwarderString));
+
+ hint = import->Hint;
+ name = reinterpret_cast<LPCSTR>(&import->Name);
+ }
+
+ if (!callback(*this, module_name, ordinal, name, hint, iat, cookie))
+ return false;
+ }
+
+ return true;
+}
+
+bool PEImage::EnumAllImports(EnumImportsFunction callback,
+ PVOID cookie,
+ LPCSTR target_module_name) const {
+ EnumAllImportsStorage temp = {callback, cookie};
+ return EnumImportChunks(ProcessImportChunk, &temp, target_module_name);
+}
+
+bool PEImage::EnumDelayImportChunks(EnumDelayImportChunksFunction callback,
+ PVOID cookie,
+ LPCSTR target_module_name) const {
+ PVOID directory =
+ GetImageDirectoryEntryAddr(IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT);
+ DWORD size = GetImageDirectoryEntrySize(IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT);
+
+ if (!directory || !size)
+ return true;
+
+ PImgDelayDescr delay_descriptor = reinterpret_cast<PImgDelayDescr>(directory);
+ for (; delay_descriptor->rvaHmod; delay_descriptor++) {
+ PIMAGE_THUNK_DATA name_table;
+ PIMAGE_THUNK_DATA iat;
+ LPCSTR module_name;
+
+ // check if VC7-style imports, using RVAs instead of
+ // VC6-style addresses.
+ bool rvas = (delay_descriptor->grAttrs & dlattrRva) != 0;
+
+ if (rvas) {
+ module_name =
+ reinterpret_cast<LPCSTR>(RVAToAddr(delay_descriptor->rvaDLLName));
+ name_table = reinterpret_cast<PIMAGE_THUNK_DATA>(
+ RVAToAddr(delay_descriptor->rvaINT));
+ iat = reinterpret_cast<PIMAGE_THUNK_DATA>(
+ RVAToAddr(delay_descriptor->rvaIAT));
+ } else {
+ // Values in IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT are 32-bit, even on 64-bit
+ // platforms. See section 4.8 of PECOFF image spec rev 8.3.
+ module_name = reinterpret_cast<LPCSTR>(
+ static_cast<uintptr_t>(delay_descriptor->rvaDLLName));
+ name_table = reinterpret_cast<PIMAGE_THUNK_DATA>(
+ static_cast<uintptr_t>(delay_descriptor->rvaINT));
+ iat = reinterpret_cast<PIMAGE_THUNK_DATA>(
+ static_cast<uintptr_t>(delay_descriptor->rvaIAT));
+ }
+
+ if (target_module_name == nullptr ||
+ (lstrcmpiA(module_name, target_module_name) == 0)) {
+ if (target_module_name) {
+ // Ensure all imports are properly loaded for the target module so that
+ // the callback is operating on a fully-realized set of imports.
+ // This call only loads the imports for the module where this code is
+ // executing, so it is only helpful or meaningful to do this if the
+ // current module is the module whose IAT we are enumerating.
+ // Use the module_name as retrieved from the IAT because this method
+ // is case sensitive.
+ if (module_ == CURRENT_MODULE() && !LDR_IS_RESOURCE(module_)) {
+ static base::NoDestructor<std::set<std::string>> loaded_dlls;
+ // pair.second is true if this is a new element
+ if (loaded_dlls.get()->emplace(module_name).second)
+ ::__HrLoadAllImportsForDll(module_name);
+ }
+ }
+
+ if (!callback(*this, delay_descriptor, module_name, name_table, iat,
+ cookie))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool PEImage::EnumOneDelayImportChunk(EnumImportsFunction callback,
+ PImgDelayDescr delay_descriptor,
+ LPCSTR module_name,
+ PIMAGE_THUNK_DATA name_table,
+ PIMAGE_THUNK_DATA iat,
+ PVOID cookie) const {
+ for (; name_table->u1.Ordinal; name_table++, iat++) {
+ LPCSTR name = nullptr;
+ WORD ordinal = 0;
+ WORD hint = 0;
+
+ if (IMAGE_SNAP_BY_ORDINAL(name_table->u1.Ordinal)) {
+ ordinal = static_cast<WORD>(IMAGE_ORDINAL32(name_table->u1.Ordinal));
+ } else {
+ PIMAGE_IMPORT_BY_NAME import;
+ bool rvas = (delay_descriptor->grAttrs & dlattrRva) != 0;
+
+ if (rvas) {
+ import = reinterpret_cast<PIMAGE_IMPORT_BY_NAME>(
+ RVAToAddr(name_table->u1.ForwarderString));
+ } else {
+ import = reinterpret_cast<PIMAGE_IMPORT_BY_NAME>(
+ name_table->u1.ForwarderString);
+ }
+
+ hint = import->Hint;
+ name = reinterpret_cast<LPCSTR>(&import->Name);
+ }
+
+ if (!callback(*this, module_name, ordinal, name, hint, iat, cookie))
+ return false;
+ }
+
+ return true;
+}
+
+bool PEImage::EnumAllDelayImports(EnumImportsFunction callback,
+ PVOID cookie,
+ LPCSTR target_module_name) const {
+ EnumAllImportsStorage temp = {callback, cookie};
+ return EnumDelayImportChunks(ProcessDelayImportChunk, &temp,
+ target_module_name);
+}
+
+bool PEImage::VerifyMagic() const {
+ PIMAGE_DOS_HEADER dos_header = GetDosHeader();
+
+ if (dos_header->e_magic != IMAGE_DOS_SIGNATURE)
+ return false;
+
+ PIMAGE_NT_HEADERS nt_headers = GetNTHeaders();
+
+ if (nt_headers->Signature != IMAGE_NT_SIGNATURE)
+ return false;
+
+ if (nt_headers->FileHeader.SizeOfOptionalHeader !=
+ sizeof(IMAGE_OPTIONAL_HEADER))
+ return false;
+
+ if (nt_headers->OptionalHeader.Magic != IMAGE_NT_OPTIONAL_HDR_MAGIC)
+ return false;
+
+ return true;
+}
+
+bool PEImage::ImageRVAToOnDiskOffset(uintptr_t rva,
+ DWORD* on_disk_offset) const {
+ LPVOID address = RVAToAddr(rva);
+ return ImageAddrToOnDiskOffset(address, on_disk_offset);
+}
+
+bool PEImage::ImageAddrToOnDiskOffset(LPVOID address,
+ DWORD* on_disk_offset) const {
+ if (nullptr == address)
+ return false;
+
+ // Get the section that this address belongs to.
+ PIMAGE_SECTION_HEADER section_header = GetImageSectionFromAddr(address);
+ if (nullptr == section_header)
+ return false;
+
+ // Don't follow the virtual RVAToAddr, use the one on the base.
+ DWORD offset_within_section =
+ static_cast<DWORD>(reinterpret_cast<uintptr_t>(address)) -
+ static_cast<DWORD>(reinterpret_cast<uintptr_t>(
+ PEImage::RVAToAddr(section_header->VirtualAddress)));
+
+ *on_disk_offset = section_header->PointerToRawData + offset_within_section;
+ return true;
+}
+
+PVOID PEImage::RVAToAddr(uintptr_t rva) const {
+ if (rva == 0)
+ return nullptr;
+
+ return reinterpret_cast<char*>(module_) + rva;
+}
+
+const IMAGE_DATA_DIRECTORY* PEImage::GetDataDirectory(UINT directory) const {
+ PIMAGE_NT_HEADERS nt_headers = GetNTHeaders();
+
+ // Does the image report that it includes this directory entry?
+ if (directory >= nt_headers->OptionalHeader.NumberOfRvaAndSizes)
+ return nullptr;
+
+ // Is there space for this directory entry in the optional header?
+ if (nt_headers->FileHeader.SizeOfOptionalHeader <
+ (offsetof(IMAGE_OPTIONAL_HEADER, DataDirectory) +
+ (directory + 1) * sizeof(IMAGE_DATA_DIRECTORY))) {
+ return nullptr;
+ }
+
+ return &nt_headers->OptionalHeader.DataDirectory[directory];
+}
+
+PVOID PEImageAsData::RVAToAddr(uintptr_t rva) const {
+ if (rva == 0)
+ return nullptr;
+
+ PVOID in_memory = PEImage::RVAToAddr(rva);
+ DWORD disk_offset;
+
+ if (!ImageAddrToOnDiskOffset(in_memory, &disk_offset))
+ return nullptr;
+
+ return PEImage::RVAToAddr(disk_offset);
+}
+
+} // namespace win
+} // namespace base
diff --git a/security/sandbox/chromium/base/win/pe_image.h b/security/sandbox/chromium/base/win/pe_image.h
new file mode 100644
index 0000000000..cec959a940
--- /dev/null
+++ b/security/sandbox/chromium/base/win/pe_image.h
@@ -0,0 +1,308 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file was adapted from GreenBorder's Code.
+// To understand what this class is about (for other than well known functions
+// as GetProcAddress), a good starting point is "An In-Depth Look into the
+// Win32 Portable Executable File Format" by Matt Pietrek:
+// http://msdn.microsoft.com/msdnmag/issues/02/02/PE/default.aspx
+
+#ifndef BASE_WIN_PE_IMAGE_H_
+#define BASE_WIN_PE_IMAGE_H_
+
+#include <windows.h>
+
+#include <stdint.h>
+
+#if defined(_WIN32_WINNT_WIN8)
+// The Windows 8 SDK defines FACILITY_VISUALCPP in winerror.h.
+#undef FACILITY_VISUALCPP
+#endif
+#include <delayimp.h>
+
+namespace base {
+namespace win {
+
+// This class is a wrapper for the Portable Executable File Format (PE).
+// Its main purpose is to provide an easy way to work with imports and exports
+// from a file, mapped in memory as image.
+class PEImage {
+ public:
+ // Callback to enumerate sections.
+ // cookie is the value passed to the enumerate method.
+ // Returns true to continue the enumeration.
+ using EnumSectionsFunction =
+ bool (*)(const PEImage&, PIMAGE_SECTION_HEADER, PVOID, DWORD, PVOID);
+
+ // Callback to enumerate exports.
+ // function is the actual address of the symbol. If forward is not null, it
+ // contains the dll and symbol to forward this export to. cookie is the value
+ // passed to the enumerate method.
+ // Returns true to continue the enumeration.
+ using EnumExportsFunction =
+ bool (*)(const PEImage&, DWORD, DWORD, LPCSTR, PVOID, LPCSTR, PVOID);
+
+ // Callback to enumerate import blocks.
+ // name_table and iat point to the imports name table and address table for
+ // this block. cookie is the value passed to the enumerate method.
+ // Returns true to continue the enumeration.
+ using EnumImportChunksFunction = bool (*)(const PEImage&,
+ LPCSTR,
+ PIMAGE_THUNK_DATA,
+ PIMAGE_THUNK_DATA,
+ PVOID);
+
+ // Callback to enumerate imports.
+ // module is the dll that exports this symbol. cookie is the value passed to
+ // the enumerate method.
+ // Returns true to continue the enumeration.
+ using EnumImportsFunction = bool (*)(const PEImage&,
+ LPCSTR,
+ DWORD,
+ LPCSTR,
+ DWORD,
+ PIMAGE_THUNK_DATA,
+ PVOID);
+
+ // Callback to enumerate delayed import blocks.
+ // module is the dll that exports this block of symbols. cookie is the value
+ // passed to the enumerate method.
+ // Returns true to continue the enumeration.
+ using EnumDelayImportChunksFunction = bool (*)(const PEImage&,
+ PImgDelayDescr,
+ LPCSTR,
+ PIMAGE_THUNK_DATA,
+ PIMAGE_THUNK_DATA,
+ PVOID);
+
+ // Callback to enumerate relocations.
+ // cookie is the value passed to the enumerate method.
+ // Returns true to continue the enumeration.
+ using EnumRelocsFunction = bool (*)(const PEImage&, WORD, PVOID, PVOID);
+
+ explicit PEImage(HMODULE module) : module_(module) {}
+ explicit PEImage(const void* module) {
+ module_ = reinterpret_cast<HMODULE>(const_cast<void*>(module));
+ }
+
+ virtual ~PEImage() = default;
+
+ // Gets the HMODULE for this object.
+ HMODULE module() const;
+
+ // Sets this object's HMODULE.
+ void set_module(HMODULE module);
+
+ // Checks if this symbol is actually an ordinal.
+ static bool IsOrdinal(LPCSTR name);
+
+ // Converts a named symbol to the corresponding ordinal.
+ static WORD ToOrdinal(LPCSTR name);
+
+ // Returns the DOS_HEADER for this PE.
+ PIMAGE_DOS_HEADER GetDosHeader() const;
+
+ // Returns the NT_HEADER for this PE.
+ PIMAGE_NT_HEADERS GetNTHeaders() const;
+
+ // Returns number of sections of this PE.
+ WORD GetNumSections() const;
+
+ // Returns the header for a given section.
+ // returns NULL if there is no such section.
+ PIMAGE_SECTION_HEADER GetSectionHeader(UINT section) const;
+
+ // Returns the size of a given directory entry or 0 if |directory| is out of
+ // bounds.
+ DWORD GetImageDirectoryEntrySize(UINT directory) const;
+
+ // Returns the address of a given directory entry or NULL if |directory| is
+ // out of bounds.
+ PVOID GetImageDirectoryEntryAddr(UINT directory) const;
+
+ // Returns the section header for a given address.
+ // Use: s = image.GetImageSectionFromAddr(a);
+ // Post: 's' is the section header of the section that contains 'a'
+ // or NULL if there is no such section.
+ PIMAGE_SECTION_HEADER GetImageSectionFromAddr(PVOID address) const;
+
+ // Returns the section header for a given section.
+ PIMAGE_SECTION_HEADER GetImageSectionHeaderByName(LPCSTR section_name) const;
+
+ // Returns the first block of imports.
+ PIMAGE_IMPORT_DESCRIPTOR GetFirstImportChunk() const;
+
+ // Returns the exports directory.
+ PIMAGE_EXPORT_DIRECTORY GetExportDirectory() const;
+
+ // Retrieves the contents of the image's CodeView debug entry, returning true
+ // if such an entry is found and is within a section mapped into the current
+ // process's memory. |guid|, |age|, and |pdb_filename| are each optional and
+ // may be NULL. |pdb_filename_length| is mandatory if |pdb_filename| is not
+ // NULL, as the latter is populated with a direct reference to a string in the
+ // image that is is not guaranteed to be terminated (note: informal
+ // documentation indicates that it should be terminated, but the data is
+ // untrusted). Furthermore, owing to its nature of being a string in the
+ // image, it is only valid while the image is mapped into the process, and the
+ // caller is not responsible for freeing it. |pdb_filename_length| is
+ // populated with the string length of |pdb_filename| (not including a
+ // terminator) and must be used rather than relying on |pdb_filename| being
+ // properly terminated.
+ bool GetDebugId(LPGUID guid,
+ LPDWORD age,
+ LPCSTR* pdb_filename,
+ size_t* pdb_filename_length) const;
+
+ // Returns a given export entry.
+ // Use: e = image.GetExportEntry(f);
+ // Pre: 'f' is either a zero terminated string or ordinal
+ // Post: 'e' is a pointer to the export directory entry
+ // that contains 'f's export RVA, or NULL if 'f'
+ // is not exported from this image
+ PDWORD GetExportEntry(LPCSTR name) const;
+
+ // Returns the address for a given exported symbol.
+ // Use: p = image.GetProcAddress(f);
+ // Pre: 'f' is either a zero terminated string or ordinal.
+ // Post: if 'f' is a non-forwarded export from image, 'p' is
+ // the exported function. If 'f' is a forwarded export
+ // then p is the special value -1. In this case
+ // RVAToAddr(*GetExportEntry) can be used to resolve
+ // the string that describes the forward.
+ FARPROC GetProcAddress(LPCSTR function_name) const;
+
+ // Retrieves the ordinal for a given exported symbol.
+ // Returns true if the symbol was found.
+ bool GetProcOrdinal(LPCSTR function_name, WORD* ordinal) const;
+
+ // Enumerates PE sections.
+ // cookie is a generic cookie to pass to the callback.
+ // Returns true on success.
+ bool EnumSections(EnumSectionsFunction callback, PVOID cookie) const;
+
+ // Enumerates PE exports.
+ // cookie is a generic cookie to pass to the callback.
+ // Returns true on success.
+ bool EnumExports(EnumExportsFunction callback, PVOID cookie) const;
+
+ // Enumerates PE imports.
+ // cookie is a generic cookie to pass to the callback.
+ // Returns true on success.
+ // Use |target_module_name| to ensure the callback is only invoked for the
+ // specified module.
+ bool EnumAllImports(EnumImportsFunction callback,
+ PVOID cookie,
+ LPCSTR target_module_name) const;
+
+ // Enumerates PE import blocks.
+ // cookie is a generic cookie to pass to the callback.
+ // Returns true on success.
+ // Use |target_module_name| to ensure the callback is only invoked for the
+ // specified module.
+ bool EnumImportChunks(EnumImportChunksFunction callback,
+ PVOID cookie,
+ LPCSTR target_module_name) const;
+
+ // Enumerates the imports from a single PE import block.
+ // cookie is a generic cookie to pass to the callback.
+ // Returns true on success.
+ bool EnumOneImportChunk(EnumImportsFunction callback,
+ LPCSTR module_name,
+ PIMAGE_THUNK_DATA name_table,
+ PIMAGE_THUNK_DATA iat,
+ PVOID cookie) const;
+
+ // Enumerates PE delay imports.
+ // cookie is a generic cookie to pass to the callback.
+ // Returns true on success.
+ // Use |target_module_name| to ensure the callback is only invoked for the
+ // specified module. If this parameter is non-null then all delayloaded
+ // imports are resolved when the target module is found.
+ bool EnumAllDelayImports(EnumImportsFunction callback,
+ PVOID cookie,
+ LPCSTR target_module_name) const;
+
+ // Enumerates PE delay import blocks.
+ // cookie is a generic cookie to pass to the callback.
+ // Returns true on success.
+ // Use |target_module_name| to ensure the callback is only invoked for the
+ // specified module. If this parameter is non-null then all delayloaded
+ // imports are resolved when the target module is found.
+ bool EnumDelayImportChunks(EnumDelayImportChunksFunction callback,
+ PVOID cookie,
+ LPCSTR target_module_name) const;
+
+ // Enumerates imports from a single PE delay import block.
+ // cookie is a generic cookie to pass to the callback.
+ // Returns true on success.
+ bool EnumOneDelayImportChunk(EnumImportsFunction callback,
+ PImgDelayDescr delay_descriptor,
+ LPCSTR module_name,
+ PIMAGE_THUNK_DATA name_table,
+ PIMAGE_THUNK_DATA iat,
+ PVOID cookie) const;
+
+ // Enumerates PE relocation entries.
+ // cookie is a generic cookie to pass to the callback.
+ // Returns true on success.
+ bool EnumRelocs(EnumRelocsFunction callback, PVOID cookie) const;
+
+ // Verifies the magic values on the PE file.
+ // Returns true if all values are correct.
+ bool VerifyMagic() const;
+
+ // Converts an rva value to the appropriate address.
+ virtual PVOID RVAToAddr(uintptr_t rva) const;
+
+ // Converts an rva value to an offset on disk.
+ // Returns true on success.
+ bool ImageRVAToOnDiskOffset(uintptr_t rva, DWORD* on_disk_offset) const;
+
+ // Converts an address to an offset on disk.
+ // Returns true on success.
+ bool ImageAddrToOnDiskOffset(LPVOID address, DWORD* on_disk_offset) const;
+
+ private:
+ // Returns a pointer to a data directory, or NULL if |directory| is out of
+ // range.
+ const IMAGE_DATA_DIRECTORY* GetDataDirectory(UINT directory) const;
+
+ HMODULE module_;
+};
+
+// This class is an extension to the PEImage class that allows working with PE
+// files mapped as data instead of as image file.
+class PEImageAsData : public PEImage {
+ public:
+ explicit PEImageAsData(HMODULE hModule) : PEImage(hModule) {}
+
+ PVOID RVAToAddr(uintptr_t rva) const override;
+};
+
+inline bool PEImage::IsOrdinal(LPCSTR name) {
+ return reinterpret_cast<uintptr_t>(name) <= 0xFFFF;
+}
+
+inline WORD PEImage::ToOrdinal(LPCSTR name) {
+ return static_cast<WORD>(reinterpret_cast<intptr_t>(name));
+}
+
+inline HMODULE PEImage::module() const {
+ return module_;
+}
+
+inline PIMAGE_IMPORT_DESCRIPTOR PEImage::GetFirstImportChunk() const {
+ return reinterpret_cast<PIMAGE_IMPORT_DESCRIPTOR>(
+ GetImageDirectoryEntryAddr(IMAGE_DIRECTORY_ENTRY_IMPORT));
+}
+
+inline PIMAGE_EXPORT_DIRECTORY PEImage::GetExportDirectory() const {
+ return reinterpret_cast<PIMAGE_EXPORT_DIRECTORY>(
+ GetImageDirectoryEntryAddr(IMAGE_DIRECTORY_ENTRY_EXPORT));
+}
+
+} // namespace win
+} // namespace base
+
+#endif // BASE_WIN_PE_IMAGE_H_
diff --git a/security/sandbox/chromium/base/win/scoped_handle.cc b/security/sandbox/chromium/base/win/scoped_handle.cc
new file mode 100644
index 0000000000..de6854591b
--- /dev/null
+++ b/security/sandbox/chromium/base/win/scoped_handle.cc
@@ -0,0 +1,44 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/scoped_handle.h"
+#include "base/win/scoped_handle_verifier.h"
+#include "base/win/windows_types.h"
+
+namespace base {
+namespace win {
+
+using base::win::internal::ScopedHandleVerifier;
+
+// Static.
+bool HandleTraits::CloseHandle(HANDLE handle) {
+ return ScopedHandleVerifier::Get()->CloseHandle(handle);
+}
+
+// Static.
+void VerifierTraits::StartTracking(HANDLE handle,
+ const void* owner,
+ const void* pc1,
+ const void* pc2) {
+ return ScopedHandleVerifier::Get()->StartTracking(handle, owner, pc1, pc2);
+}
+
+// Static.
+void VerifierTraits::StopTracking(HANDLE handle,
+ const void* owner,
+ const void* pc1,
+ const void* pc2) {
+ return ScopedHandleVerifier::Get()->StopTracking(handle, owner, pc1, pc2);
+}
+
+void DisableHandleVerifier() {
+ return ScopedHandleVerifier::Get()->Disable();
+}
+
+void OnHandleBeingClosed(HANDLE handle) {
+ return ScopedHandleVerifier::Get()->OnHandleBeingClosed(handle);
+}
+
+} // namespace win
+} // namespace base
diff --git a/security/sandbox/chromium/base/win/scoped_handle.h b/security/sandbox/chromium/base/win/scoped_handle.h
new file mode 100644
index 0000000000..02c2533649
--- /dev/null
+++ b/security/sandbox/chromium/base/win/scoped_handle.h
@@ -0,0 +1,184 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_SCOPED_HANDLE_H_
+#define BASE_WIN_SCOPED_HANDLE_H_
+
+#include "base/win/windows_types.h"
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "base/gtest_prod_util.h"
+#include "base/location.h"
+#include "base/logging.h"
+#include "base/macros.h"
+
+// TODO(rvargas): remove this with the rest of the verifier.
+#if defined(COMPILER_MSVC)
+#include <intrin.h>
+#define BASE_WIN_GET_CALLER _ReturnAddress()
+#elif defined(COMPILER_GCC)
+#define BASE_WIN_GET_CALLER \
+ __builtin_extract_return_addr(__builtin_return_address(0))
+#endif
+
+namespace base {
+namespace win {
+
+// Generic wrapper for raw handles that takes care of closing handles
+// automatically. The class interface follows the style of
+// the ScopedFILE class with two additions:
+// - IsValid() method can tolerate multiple invalid handle values such as NULL
+// and INVALID_HANDLE_VALUE (-1) for Win32 handles.
+// - Set() (and the constructors and assignment operators that call it)
+// preserve the Windows LastError code. This ensures that GetLastError() can
+// be called after stashing a handle in a GenericScopedHandle object. Doing
+// this explicitly is necessary because of bug 528394 and VC++ 2015.
+template <class Traits, class Verifier>
+class GenericScopedHandle {
+ public:
+ using Handle = typename Traits::Handle;
+
+ GenericScopedHandle() : handle_(Traits::NullHandle()) {}
+
+ explicit GenericScopedHandle(Handle handle) : handle_(Traits::NullHandle()) {
+ Set(handle);
+ }
+
+ GenericScopedHandle(GenericScopedHandle&& other)
+ : handle_(Traits::NullHandle()) {
+ Set(other.Take());
+ }
+
+ ~GenericScopedHandle() { Close(); }
+
+ bool IsValid() const { return Traits::IsHandleValid(handle_); }
+
+ GenericScopedHandle& operator=(GenericScopedHandle&& other) {
+ DCHECK_NE(this, &other);
+ Set(other.Take());
+ return *this;
+ }
+
+ void Set(Handle handle) {
+ if (handle_ != handle) {
+ // Preserve old LastError to avoid bug 528394.
+ auto last_error = ::GetLastError();
+ Close();
+
+ if (Traits::IsHandleValid(handle)) {
+ handle_ = handle;
+ Verifier::StartTracking(handle, this, BASE_WIN_GET_CALLER,
+ GetProgramCounter());
+ }
+ ::SetLastError(last_error);
+ }
+ }
+
+ Handle Get() const { return handle_; }
+
+ // Transfers ownership away from this object.
+ Handle Take() WARN_UNUSED_RESULT {
+ Handle temp = handle_;
+ handle_ = Traits::NullHandle();
+ if (Traits::IsHandleValid(temp)) {
+ Verifier::StopTracking(temp, this, BASE_WIN_GET_CALLER,
+ GetProgramCounter());
+ }
+ return temp;
+ }
+
+ // Explicitly closes the owned handle.
+ void Close() {
+ if (Traits::IsHandleValid(handle_)) {
+ Verifier::StopTracking(handle_, this, BASE_WIN_GET_CALLER,
+ GetProgramCounter());
+
+ Traits::CloseHandle(handle_);
+ handle_ = Traits::NullHandle();
+ }
+ }
+
+ private:
+ FRIEND_TEST_ALL_PREFIXES(ScopedHandleTest, ActiveVerifierWrongOwner);
+ FRIEND_TEST_ALL_PREFIXES(ScopedHandleTest, ActiveVerifierUntrackedHandle);
+ Handle handle_;
+
+ DISALLOW_COPY_AND_ASSIGN(GenericScopedHandle);
+};
+
+#undef BASE_WIN_GET_CALLER
+
+// The traits class for Win32 handles that can be closed via CloseHandle() API.
+class HandleTraits {
+ public:
+ using Handle = HANDLE;
+
+ // Closes the handle.
+ static bool BASE_EXPORT CloseHandle(HANDLE handle);
+
+ // Returns true if the handle value is valid.
+ static bool IsHandleValid(HANDLE handle) {
+ return handle != nullptr && handle != INVALID_HANDLE_VALUE;
+ }
+
+ // Returns NULL handle value.
+ static HANDLE NullHandle() { return nullptr; }
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(HandleTraits);
+};
+
+// Do-nothing verifier.
+class DummyVerifierTraits {
+ public:
+ using Handle = HANDLE;
+
+ static void StartTracking(HANDLE handle,
+ const void* owner,
+ const void* pc1,
+ const void* pc2) {}
+ static void StopTracking(HANDLE handle,
+ const void* owner,
+ const void* pc1,
+ const void* pc2) {}
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(DummyVerifierTraits);
+};
+
+// Performs actual run-time tracking.
+class BASE_EXPORT VerifierTraits {
+ public:
+ using Handle = HANDLE;
+
+ static void StartTracking(HANDLE handle,
+ const void* owner,
+ const void* pc1,
+ const void* pc2);
+ static void StopTracking(HANDLE handle,
+ const void* owner,
+ const void* pc1,
+ const void* pc2);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(VerifierTraits);
+};
+
+using ScopedHandle = GenericScopedHandle<HandleTraits, VerifierTraits>;
+
+// This function may be called by the embedder to disable the use of
+// VerifierTraits at runtime. It has no effect if DummyVerifierTraits is used
+// for ScopedHandle.
+BASE_EXPORT void DisableHandleVerifier();
+
+// This should be called whenever the OS is closing a handle, if extended
+// verification of improper handle closing is desired. If |handle| is being
+// tracked by the handle verifier and ScopedHandle is not the one closing it,
+// a CHECK is generated.
+BASE_EXPORT void OnHandleBeingClosed(HANDLE handle);
+} // namespace win
+} // namespace base
+
+#endif // BASE_WIN_SCOPED_HANDLE_H_
diff --git a/security/sandbox/chromium/base/win/scoped_handle_verifier.cc b/security/sandbox/chromium/base/win/scoped_handle_verifier.cc
new file mode 100644
index 0000000000..316606c0bf
--- /dev/null
+++ b/security/sandbox/chromium/base/win/scoped_handle_verifier.cc
@@ -0,0 +1,238 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/scoped_handle_verifier.h"
+
+#include <windows.h>
+
+#include <stddef.h>
+
+#include <unordered_map>
+
+#include "base/debug/alias.h"
+#include "base/debug/stack_trace.h"
+#include "base/synchronization/lock_impl.h"
+#include "base/win/base_win_buildflags.h"
+#include "base/win/current_module.h"
+
+extern "C" {
+__declspec(dllexport) void* GetHandleVerifier();
+
+void* GetHandleVerifier() {
+ return base::win::internal::ScopedHandleVerifier::Get();
+}
+} // extern C
+
+namespace {
+
+base::win::internal::ScopedHandleVerifier* g_active_verifier = nullptr;
+using GetHandleVerifierFn = void* (*)();
+using HandleMap =
+ std::unordered_map<HANDLE,
+ base::win::internal::ScopedHandleVerifierInfo,
+ base::win::internal::HandleHash>;
+using NativeLock = base::internal::LockImpl;
+
+NativeLock* GetLock() {
+ static auto* native_lock = new NativeLock();
+ return native_lock;
+}
+
+// Simple automatic locking using a native critical section so it supports
+// recursive locking.
+class AutoNativeLock {
+ public:
+ explicit AutoNativeLock(NativeLock& lock) : lock_(lock) { lock_.Lock(); }
+
+ ~AutoNativeLock() { lock_.Unlock(); }
+
+ private:
+ NativeLock& lock_;
+ DISALLOW_COPY_AND_ASSIGN(AutoNativeLock);
+};
+
+} // namespace
+
+namespace base {
+namespace win {
+namespace internal {
+
+ScopedHandleVerifier::ScopedHandleVerifier(bool enabled)
+ : enabled_(enabled), lock_(GetLock()) {}
+
+// static
+ScopedHandleVerifier* ScopedHandleVerifier::Get() {
+ if (!g_active_verifier)
+ ScopedHandleVerifier::InstallVerifier();
+
+ return g_active_verifier;
+}
+
+bool CloseHandleWrapper(HANDLE handle) {
+ if (!::CloseHandle(handle))
+ // Making this DCHECK on non-Nighly as we are hitting this frequently,
+ // looks like we are closing handles twice somehow. See bug 1564899.
+#if defined(NIGHTLY_BUILD)
+ CHECK(false); // CloseHandle failed.
+#else
+ DCHECK(false); // CloseHandle failed.
+#endif
+ return true;
+}
+
+// Assigns the g_active_verifier global within the GetLock() lock.
+// If |existing_verifier| is non-null then |enabled| is ignored.
+void ThreadSafeAssignOrCreateScopedHandleVerifier(
+ ScopedHandleVerifier* existing_verifier,
+ bool enabled) {
+ AutoNativeLock lock(*GetLock());
+ // Another thread in this module might be trying to assign the global
+ // verifier, so check that within the lock here.
+ if (g_active_verifier)
+ return;
+ g_active_verifier =
+ existing_verifier ? existing_verifier : new ScopedHandleVerifier(enabled);
+}
+
+// static
+void ScopedHandleVerifier::InstallVerifier() {
+#if BUILDFLAG(SINGLE_MODULE_MODE_HANDLE_VERIFIER)
+ // Component build has one Active Verifier per module.
+ ThreadSafeAssignOrCreateScopedHandleVerifier(nullptr, true);
+#else
+ // If you are reading this, wondering why your process seems deadlocked, take
+ // a look at your DllMain code and remove things that should not be done
+ // there, like doing whatever gave you that nice windows handle you are trying
+ // to store in a ScopedHandle.
+ HMODULE main_module = ::GetModuleHandle(NULL);
+ GetHandleVerifierFn get_handle_verifier =
+ reinterpret_cast<GetHandleVerifierFn>(
+ ::GetProcAddress(main_module, "GetHandleVerifier"));
+
+ // This should only happen if running in a DLL is linked with base but the
+ // hosting EXE is not. In this case, create an ScopedHandleVerifier for the
+ // current
+ // module but leave it disabled.
+ if (!get_handle_verifier) {
+ ThreadSafeAssignOrCreateScopedHandleVerifier(nullptr, false);
+ return;
+ }
+
+ // Check if in the main module.
+ if (get_handle_verifier == GetHandleVerifier) {
+ ThreadSafeAssignOrCreateScopedHandleVerifier(nullptr, true);
+ return;
+ }
+
+ ScopedHandleVerifier* main_module_verifier =
+ reinterpret_cast<ScopedHandleVerifier*>(get_handle_verifier());
+
+ // Main module should always on-demand create a verifier.
+ DCHECK(main_module_verifier);
+
+ ThreadSafeAssignOrCreateScopedHandleVerifier(main_module_verifier, false);
+#endif
+}
+
+bool ScopedHandleVerifier::CloseHandle(HANDLE handle) {
+ if (!enabled_)
+ return CloseHandleWrapper(handle);
+
+ closing_.Set(true);
+ CloseHandleWrapper(handle);
+ closing_.Set(false);
+
+ return true;
+}
+
+// static
+NativeLock* ScopedHandleVerifier::GetLock() {
+ return ::GetLock();
+}
+
+void ScopedHandleVerifier::StartTracking(HANDLE handle,
+ const void* owner,
+ const void* pc1,
+ const void* pc2) {
+ if (!enabled_)
+ return;
+
+ // Grab the thread id before the lock.
+ DWORD thread_id = GetCurrentThreadId();
+
+ AutoNativeLock lock(*lock_);
+
+ ScopedHandleVerifierInfo handle_info = {owner, pc1, pc2,
+ base::debug::StackTrace(), thread_id};
+ std::pair<HANDLE, ScopedHandleVerifierInfo> item(handle, handle_info);
+ std::pair<HandleMap::iterator, bool> result = map_.insert(item);
+ if (!result.second) {
+ ScopedHandleVerifierInfo other = result.first->second;
+ base::debug::Alias(&other);
+ auto creation_stack = creation_stack_;
+ base::debug::Alias(&creation_stack);
+ CHECK(false); // Attempt to start tracking already tracked handle.
+ }
+}
+
+void ScopedHandleVerifier::StopTracking(HANDLE handle,
+ const void* owner,
+ const void* pc1,
+ const void* pc2) {
+ if (!enabled_)
+ return;
+
+ AutoNativeLock lock(*lock_);
+ HandleMap::iterator i = map_.find(handle);
+ if (i == map_.end()) {
+ auto creation_stack = creation_stack_;
+ base::debug::Alias(&creation_stack);
+ CHECK(false); // Attempting to close an untracked handle.
+ }
+
+ ScopedHandleVerifierInfo other = i->second;
+ if (other.owner != owner) {
+ base::debug::Alias(&other);
+ auto creation_stack = creation_stack_;
+ base::debug::Alias(&creation_stack);
+ CHECK(false); // Attempting to close a handle not owned by opener.
+ }
+
+ map_.erase(i);
+}
+
+void ScopedHandleVerifier::Disable() {
+ enabled_ = false;
+}
+
+void ScopedHandleVerifier::OnHandleBeingClosed(HANDLE handle) {
+ if (!enabled_)
+ return;
+
+ if (closing_.Get())
+ return;
+
+ AutoNativeLock lock(*lock_);
+ HandleMap::iterator i = map_.find(handle);
+ if (i == map_.end())
+ return;
+
+ ScopedHandleVerifierInfo other = i->second;
+ base::debug::Alias(&other);
+ auto creation_stack = creation_stack_;
+ base::debug::Alias(&creation_stack);
+ CHECK(false); // CloseHandle called on tracked handle.
+}
+
+HMODULE ScopedHandleVerifier::GetModule() const {
+ return CURRENT_MODULE();
+}
+
+HMODULE GetHandleVerifierModuleForTesting() {
+ return g_active_verifier->GetModule();
+}
+
+} // namespace internal
+} // namespace win
+} // namespace base
diff --git a/security/sandbox/chromium/base/win/scoped_handle_verifier.h b/security/sandbox/chromium/base/win/scoped_handle_verifier.h
new file mode 100644
index 0000000000..596e2c47eb
--- /dev/null
+++ b/security/sandbox/chromium/base/win/scoped_handle_verifier.h
@@ -0,0 +1,88 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_SCOPED_HANDLE_VERIFIER_H_
+#define BASE_WIN_SCOPED_HANDLE_VERIFIER_H_
+
+#include "base/win/windows_types.h"
+
+#include <unordered_map>
+
+#include "base/base_export.h"
+#include "base/debug/stack_trace.h"
+#include "base/hash/hash.h"
+#include "base/synchronization/lock_impl.h"
+#include "base/threading/thread_local.h"
+
+namespace base {
+namespace win {
+namespace internal {
+
+struct HandleHash {
+ size_t operator()(const HANDLE& handle) const {
+ return base::FastHash(as_bytes(make_span(&handle, 1)));
+ }
+};
+
+struct ScopedHandleVerifierInfo {
+ const void* owner;
+ const void* pc1;
+ const void* pc2;
+ base::debug::StackTrace stack;
+ DWORD thread_id;
+};
+
+// Implements the actual object that is verifying handles for this process.
+// The active instance is shared across the module boundary but there is no
+// way to delete this object from the wrong side of it (or any side, actually).
+// We need [[clang::lto_visibility_public]] because instances of this class are
+// passed across module boundaries. This means different modules must have
+// compatible definitions of the class even when whole program optimization is
+// enabled - which is what this attribute accomplishes. The pragma stops MSVC
+// from emitting an unrecognized attribute warning.
+#pragma warning(push)
+#pragma warning(disable : 5030)
+class [[clang::lto_visibility_public]] ScopedHandleVerifier {
+#pragma warning(pop)
+ public:
+ explicit ScopedHandleVerifier(bool enabled);
+
+ // Retrieves the current verifier.
+ static ScopedHandleVerifier* Get();
+
+ // The methods required by HandleTraits. They are virtual because we need to
+ // forward the call execution to another module, instead of letting the
+ // compiler call the version that is linked in the current module.
+ virtual bool CloseHandle(HANDLE handle);
+ virtual void StartTracking(HANDLE handle, const void* owner, const void* pc1,
+ const void* pc2);
+ virtual void StopTracking(HANDLE handle, const void* owner, const void* pc1,
+ const void* pc2);
+ virtual void Disable();
+ virtual void OnHandleBeingClosed(HANDLE handle);
+ virtual HMODULE GetModule() const;
+
+ private:
+ ~ScopedHandleVerifier(); // Not implemented.
+
+ static base::internal::LockImpl* GetLock();
+ static void InstallVerifier();
+
+ base::debug::StackTrace creation_stack_;
+ bool enabled_;
+ base::ThreadLocalBoolean closing_;
+ base::internal::LockImpl* lock_;
+ std::unordered_map<HANDLE, ScopedHandleVerifierInfo, HandleHash> map_;
+ DISALLOW_COPY_AND_ASSIGN(ScopedHandleVerifier);
+};
+
+// This testing function returns the module that the ActiveVerifier concrete
+// implementation was instantiated in.
+BASE_EXPORT HMODULE GetHandleVerifierModuleForTesting();
+
+} // namespace internal
+} // namespace win
+} // namespace base
+
+#endif // BASE_WIN_SCOPED_HANDLE_VERIFIER_H_
diff --git a/security/sandbox/chromium/base/win/scoped_process_information.cc b/security/sandbox/chromium/base/win/scoped_process_information.cc
new file mode 100644
index 0000000000..d3024bb5e9
--- /dev/null
+++ b/security/sandbox/chromium/base/win/scoped_process_information.cc
@@ -0,0 +1,107 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/scoped_process_information.h"
+
+#include "base/logging.h"
+#include "base/win/scoped_handle.h"
+
+namespace base {
+namespace win {
+
+namespace {
+
+// Duplicates source into target, returning true upon success. |target| is
+// guaranteed to be untouched in case of failure. Succeeds with no side-effects
+// if source is NULL.
+bool CheckAndDuplicateHandle(HANDLE source, ScopedHandle* target) {
+ if (!source)
+ return true;
+
+ HANDLE temp = nullptr;
+ if (!::DuplicateHandle(::GetCurrentProcess(), source, ::GetCurrentProcess(),
+ &temp, 0, FALSE, DUPLICATE_SAME_ACCESS)) {
+ DWORD last_error = ::GetLastError();
+ DPLOG(ERROR) << "Failed to duplicate a handle " << last_error;
+ ::SetLastError(last_error);
+ return false;
+ }
+ target->Set(temp);
+ return true;
+}
+
+} // namespace
+
+ScopedProcessInformation::ScopedProcessInformation() = default;
+
+ScopedProcessInformation::ScopedProcessInformation(
+ const PROCESS_INFORMATION& process_info) {
+ Set(process_info);
+}
+
+ScopedProcessInformation::~ScopedProcessInformation() {
+ Close();
+}
+
+bool ScopedProcessInformation::IsValid() const {
+ return process_id_ || process_handle_.Get() || thread_id_ ||
+ thread_handle_.Get();
+}
+
+void ScopedProcessInformation::Close() {
+ process_handle_.Close();
+ thread_handle_.Close();
+ process_id_ = 0;
+ thread_id_ = 0;
+}
+
+void ScopedProcessInformation::Set(const PROCESS_INFORMATION& process_info) {
+ if (IsValid())
+ Close();
+
+ process_handle_.Set(process_info.hProcess);
+ thread_handle_.Set(process_info.hThread);
+ process_id_ = process_info.dwProcessId;
+ thread_id_ = process_info.dwThreadId;
+}
+
+bool ScopedProcessInformation::DuplicateFrom(
+ const ScopedProcessInformation& other) {
+ DCHECK(!IsValid()) << "target ScopedProcessInformation must be NULL";
+ DCHECK(other.IsValid()) << "source ScopedProcessInformation must be valid";
+
+ if (CheckAndDuplicateHandle(other.process_handle(), &process_handle_) &&
+ CheckAndDuplicateHandle(other.thread_handle(), &thread_handle_)) {
+ process_id_ = other.process_id();
+ thread_id_ = other.thread_id();
+ return true;
+ }
+
+ return false;
+}
+
+PROCESS_INFORMATION ScopedProcessInformation::Take() {
+ PROCESS_INFORMATION process_information = {};
+ process_information.hProcess = process_handle_.Take();
+ process_information.hThread = thread_handle_.Take();
+ process_information.dwProcessId = process_id();
+ process_information.dwThreadId = thread_id();
+ process_id_ = 0;
+ thread_id_ = 0;
+
+ return process_information;
+}
+
+HANDLE ScopedProcessInformation::TakeProcessHandle() {
+ process_id_ = 0;
+ return process_handle_.Take();
+}
+
+HANDLE ScopedProcessInformation::TakeThreadHandle() {
+ thread_id_ = 0;
+ return thread_handle_.Take();
+}
+
+} // namespace win
+} // namespace base
diff --git a/security/sandbox/chromium/base/win/scoped_process_information.h b/security/sandbox/chromium/base/win/scoped_process_information.h
new file mode 100644
index 0000000000..3b85d1bfab
--- /dev/null
+++ b/security/sandbox/chromium/base/win/scoped_process_information.h
@@ -0,0 +1,75 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_SCOPED_PROCESS_INFORMATION_H_
+#define BASE_WIN_SCOPED_PROCESS_INFORMATION_H_
+
+#include <windows.h>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/win/scoped_handle.h"
+
+namespace base {
+namespace win {
+
+// Manages the closing of process and thread handles from PROCESS_INFORMATION
+// structures. Allows clients to take ownership of either handle independently.
+class BASE_EXPORT ScopedProcessInformation {
+ public:
+ ScopedProcessInformation();
+ explicit ScopedProcessInformation(const PROCESS_INFORMATION& process_info);
+ ~ScopedProcessInformation();
+
+ // Returns true iff this instance is holding a thread and/or process handle.
+ bool IsValid() const;
+
+ // Closes the held thread and process handles, if any.
+ void Close();
+
+ // Populates this instance with the provided |process_info|.
+ void Set(const PROCESS_INFORMATION& process_info);
+
+ // Populates this instance with duplicate handles and the thread/process IDs
+ // from |other|. Returns false in case of failure, in which case this instance
+ // will be completely unpopulated.
+ bool DuplicateFrom(const ScopedProcessInformation& other);
+
+ // Transfers ownership of the held PROCESS_INFORMATION, if any, away from this
+ // instance.
+ PROCESS_INFORMATION Take();
+
+ // Transfers ownership of the held process handle, if any, away from this
+ // instance. Note that the related process_id will also be cleared.
+ HANDLE TakeProcessHandle();
+
+ // Transfers ownership of the held thread handle, if any, away from this
+ // instance. Note that the related thread_id will also be cleared.
+ HANDLE TakeThreadHandle();
+
+ // Returns the held process handle, if any, while retaining ownership.
+ HANDLE process_handle() const { return process_handle_.Get(); }
+
+ // Returns the held thread handle, if any, while retaining ownership.
+ HANDLE thread_handle() const { return thread_handle_.Get(); }
+
+ // Returns the held process id, if any.
+ DWORD process_id() const { return process_id_; }
+
+ // Returns the held thread id, if any.
+ DWORD thread_id() const { return thread_id_; }
+
+ private:
+ ScopedHandle process_handle_;
+ ScopedHandle thread_handle_;
+ DWORD process_id_ = 0;
+ DWORD thread_id_ = 0;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedProcessInformation);
+};
+
+} // namespace win
+} // namespace base
+
+#endif // BASE_WIN_SCOPED_PROCESS_INFORMATION_H_
diff --git a/security/sandbox/chromium/base/win/startup_information.cc b/security/sandbox/chromium/base/win/startup_information.cc
new file mode 100644
index 0000000000..a78508dcad
--- /dev/null
+++ b/security/sandbox/chromium/base/win/startup_information.cc
@@ -0,0 +1,59 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/startup_information.h"
+
+#include "base/logging.h"
+
+namespace base {
+namespace win {
+
+StartupInformation::StartupInformation() : startup_info_() {
+ startup_info_.StartupInfo.cb = sizeof(startup_info_);
+}
+
+StartupInformation::~StartupInformation() {
+ if (startup_info_.lpAttributeList) {
+ ::DeleteProcThreadAttributeList(startup_info_.lpAttributeList);
+ }
+}
+
+bool StartupInformation::InitializeProcThreadAttributeList(
+ DWORD attribute_count) {
+ if (startup_info_.StartupInfo.cb != sizeof(startup_info_) ||
+ startup_info_.lpAttributeList) {
+ return false;
+ }
+
+ SIZE_T size = 0;
+ ::InitializeProcThreadAttributeList(nullptr, attribute_count, 0, &size);
+ if (size == 0)
+ return false;
+
+ auto attribute_list = std::make_unique<char[]>(size);
+ auto* attribute_list_ptr =
+ reinterpret_cast<LPPROC_THREAD_ATTRIBUTE_LIST>(attribute_list.get());
+ if (!::InitializeProcThreadAttributeList(attribute_list_ptr, attribute_count,
+ 0, &size)) {
+ return false;
+ }
+
+ attribute_list_ = std::move(attribute_list);
+ startup_info_.lpAttributeList = attribute_list_ptr;
+
+ return true;
+}
+
+bool StartupInformation::UpdateProcThreadAttribute(DWORD_PTR attribute,
+ void* value,
+ size_t size) {
+ if (!startup_info_.lpAttributeList)
+ return false;
+ return !!::UpdateProcThreadAttribute(startup_info_.lpAttributeList, 0,
+ attribute, value, size, nullptr,
+ nullptr);
+}
+
+} // namespace win
+} // namespace base
diff --git a/security/sandbox/chromium/base/win/startup_information.h b/security/sandbox/chromium/base/win/startup_information.h
new file mode 100644
index 0000000000..7ef6965dd5
--- /dev/null
+++ b/security/sandbox/chromium/base/win/startup_information.h
@@ -0,0 +1,53 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_STARTUP_INFORMATION_H_
+#define BASE_WIN_STARTUP_INFORMATION_H_
+
+#include <windows.h>
+
+#include <stddef.h>
+
+#include <memory>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+
+namespace base {
+namespace win {
+
+// Manages the lifetime of additional attributes in STARTUPINFOEX.
+class BASE_EXPORT StartupInformation {
+ public:
+ StartupInformation();
+
+ ~StartupInformation();
+
+ // Initialize the attribute list for the specified number of entries.
+ bool InitializeProcThreadAttributeList(DWORD attribute_count);
+
+ // Sets one entry in the initialized attribute list.
+ // |value| needs to live at least as long as the StartupInformation object
+ // this is called on.
+ bool UpdateProcThreadAttribute(DWORD_PTR attribute, void* value, size_t size);
+
+ LPSTARTUPINFOW startup_info() { return &startup_info_.StartupInfo; }
+ LPSTARTUPINFOW startup_info() const {
+ return const_cast<const LPSTARTUPINFOW>(&startup_info_.StartupInfo);
+ }
+
+ bool has_extended_startup_info() const {
+ return !!startup_info_.lpAttributeList;
+ }
+
+ private:
+ std::unique_ptr<char[]> attribute_list_;
+ STARTUPINFOEXW startup_info_;
+ DISALLOW_COPY_AND_ASSIGN(StartupInformation);
+};
+
+} // namespace win
+} // namespace base
+
+#endif // BASE_WIN_STARTUP_INFORMATION_H_
diff --git a/security/sandbox/chromium/base/win/static_constants.cc b/security/sandbox/chromium/base/win/static_constants.cc
new file mode 100644
index 0000000000..f9894a22bd
--- /dev/null
+++ b/security/sandbox/chromium/base/win/static_constants.cc
@@ -0,0 +1,13 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/static_constants.h"
+
+namespace base {
+namespace win {
+
+const char kApplicationVerifierDllName[] = "verifier.dll";
+
+} // namespace win
+} // namespace base
diff --git a/security/sandbox/chromium/base/win/static_constants.h b/security/sandbox/chromium/base/win/static_constants.h
new file mode 100644
index 0000000000..98a631f7cf
--- /dev/null
+++ b/security/sandbox/chromium/base/win/static_constants.h
@@ -0,0 +1,21 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Defines constants needed before imports (like base.dll) are fully resolved.
+// For example, constants defined here can be used by interceptions (i.e. hooks)
+// in the sandbox, which run before imports are resolved, and can therefore only
+// reference static variables.
+
+#ifndef BASE_WIN_STATIC_CONSTANTS_H_
+#define BASE_WIN_STATIC_CONSTANTS_H_
+
+namespace base {
+namespace win {
+
+extern const char kApplicationVerifierDllName[];
+
+} // namespace win
+} // namespace base
+
+#endif // BASE_WIN_STATIC_CONSTANTS_H_
diff --git a/security/sandbox/chromium/base/win/windows_types.h b/security/sandbox/chromium/base/win/windows_types.h
new file mode 100644
index 0000000000..9be05f3c25
--- /dev/null
+++ b/security/sandbox/chromium/base/win/windows_types.h
@@ -0,0 +1,278 @@
+// Copyright (c) 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains defines and typedefs that allow popular Windows types to
+// be used without the overhead of including windows.h.
+
+#ifndef BASE_WIN_WINDOWS_TYPES_H
+#define BASE_WIN_WINDOWS_TYPES_H
+
+// Needed for function prototypes.
+#if defined(__MINGW32__)
+// MinGW doesn't have this file yet, but we only need this define.
+// Bug 1552706 tracks removing this and the one below.
+#define _Releases_exclusive_lock_(lock)
+// MinGW doesn't appear to have this in specstrings.h either.
+#define _Post_equals_last_error_
+#else
+#include <concurrencysal.h>
+#endif
+#include <sal.h>
+#include <specstrings.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// typedef and define the most commonly used Windows integer types.
+
+typedef unsigned long DWORD;
+typedef long LONG;
+typedef __int64 LONGLONG;
+typedef unsigned __int64 ULONGLONG;
+
+#define VOID void
+typedef char CHAR;
+typedef short SHORT;
+typedef long LONG;
+typedef int INT;
+typedef unsigned int UINT;
+typedef unsigned int* PUINT;
+typedef void* LPVOID;
+typedef void* PVOID;
+typedef void* HANDLE;
+typedef int BOOL;
+typedef unsigned char BYTE;
+typedef BYTE BOOLEAN;
+typedef DWORD ULONG;
+typedef unsigned short WORD;
+typedef WORD UWORD;
+typedef WORD ATOM;
+
+#if defined(_WIN64)
+typedef __int64 INT_PTR, *PINT_PTR;
+typedef unsigned __int64 UINT_PTR, *PUINT_PTR;
+
+typedef __int64 LONG_PTR, *PLONG_PTR;
+typedef unsigned __int64 ULONG_PTR, *PULONG_PTR;
+#else
+typedef __w64 int INT_PTR, *PINT_PTR;
+typedef __w64 unsigned int UINT_PTR, *PUINT_PTR;
+
+typedef __w64 long LONG_PTR, *PLONG_PTR;
+typedef __w64 unsigned long ULONG_PTR, *PULONG_PTR;
+#endif
+
+typedef UINT_PTR WPARAM;
+typedef LONG_PTR LPARAM;
+typedef LONG_PTR LRESULT;
+#define LRESULT LONG_PTR
+typedef _Return_type_success_(return >= 0) long HRESULT;
+
+typedef ULONG_PTR SIZE_T, *PSIZE_T;
+typedef LONG_PTR SSIZE_T, *PSSIZE_T;
+
+typedef DWORD ACCESS_MASK;
+typedef ACCESS_MASK REGSAM;
+
+// As defined in guiddef.h.
+#ifndef _REFGUID_DEFINED
+#define _REFGUID_DEFINED
+#define REFGUID const GUID&
+#endif
+
+// Forward declare Windows compatible handles.
+
+#define CHROME_DECLARE_HANDLE(name) \
+ struct name##__; \
+ typedef struct name##__* name
+CHROME_DECLARE_HANDLE(HDESK);
+CHROME_DECLARE_HANDLE(HGLRC);
+CHROME_DECLARE_HANDLE(HICON);
+CHROME_DECLARE_HANDLE(HINSTANCE);
+CHROME_DECLARE_HANDLE(HKEY);
+CHROME_DECLARE_HANDLE(HKL);
+CHROME_DECLARE_HANDLE(HMENU);
+CHROME_DECLARE_HANDLE(HWINSTA);
+CHROME_DECLARE_HANDLE(HWND);
+#undef CHROME_DECLARE_HANDLE
+
+typedef LPVOID HINTERNET;
+typedef HINSTANCE HMODULE;
+typedef PVOID LSA_HANDLE;
+
+// Forward declare some Windows struct/typedef sets.
+
+typedef struct _OVERLAPPED OVERLAPPED;
+typedef struct tagMSG MSG, *PMSG, *NPMSG, *LPMSG;
+
+typedef struct _RTL_SRWLOCK RTL_SRWLOCK;
+typedef RTL_SRWLOCK SRWLOCK, *PSRWLOCK;
+
+typedef struct _GUID GUID;
+typedef GUID CLSID;
+
+typedef struct tagLOGFONTW LOGFONTW, *PLOGFONTW, *NPLOGFONTW, *LPLOGFONTW;
+typedef LOGFONTW LOGFONT;
+
+typedef struct _FILETIME FILETIME;
+
+typedef struct tagMENUITEMINFOW MENUITEMINFOW, MENUITEMINFO;
+
+typedef struct tagNMHDR NMHDR;
+
+typedef PVOID PSID;
+
+// Declare Chrome versions of some Windows structures. These are needed for
+// when we need a concrete type but don't want to pull in Windows.h. We can't
+// declare the Windows types so we declare our types and cast to the Windows
+// types in a few places.
+
+struct CHROME_SRWLOCK {
+ PVOID Ptr;
+};
+
+struct CHROME_CONDITION_VARIABLE {
+ PVOID Ptr;
+};
+
+// Define some commonly used Windows constants. Note that the layout of these
+// macros - including internal spacing - must be 100% consistent with windows.h.
+
+// clang-format off
+
+#ifndef INVALID_HANDLE_VALUE
+// Work around there being two slightly different definitions in the SDK.
+#define INVALID_HANDLE_VALUE ((HANDLE)(LONG_PTR)-1)
+#endif
+#define TLS_OUT_OF_INDEXES ((DWORD)0xFFFFFFFF)
+#define HTNOWHERE 0
+#define MAX_PATH 260
+#define CS_GLOBALCLASS 0x4000
+
+#define ERROR_SUCCESS 0L
+#define ERROR_FILE_NOT_FOUND 2L
+#define ERROR_ACCESS_DENIED 5L
+#define ERROR_INVALID_HANDLE 6L
+#define ERROR_SHARING_VIOLATION 32L
+#define ERROR_LOCK_VIOLATION 33L
+#define REG_BINARY ( 3ul )
+
+#define STATUS_PENDING ((DWORD )0x00000103L)
+#define STILL_ACTIVE STATUS_PENDING
+#define SUCCEEDED(hr) (((HRESULT)(hr)) >= 0)
+#define FAILED(hr) (((HRESULT)(hr)) < 0)
+
+#define HKEY_CLASSES_ROOT (( HKEY ) (ULONG_PTR)((LONG)0x80000000) )
+#define HKEY_LOCAL_MACHINE (( HKEY ) (ULONG_PTR)((LONG)0x80000002) )
+#define HKEY_CURRENT_USER (( HKEY ) (ULONG_PTR)((LONG)0x80000001) )
+#define KEY_QUERY_VALUE (0x0001)
+#define KEY_SET_VALUE (0x0002)
+#define KEY_CREATE_SUB_KEY (0x0004)
+#define KEY_ENUMERATE_SUB_KEYS (0x0008)
+#define KEY_NOTIFY (0x0010)
+#define KEY_CREATE_LINK (0x0020)
+#define KEY_WOW64_32KEY (0x0200)
+#define KEY_WOW64_64KEY (0x0100)
+#define KEY_WOW64_RES (0x0300)
+
+#define READ_CONTROL (0x00020000L)
+#define SYNCHRONIZE (0x00100000L)
+
+#define STANDARD_RIGHTS_READ (READ_CONTROL)
+#define STANDARD_RIGHTS_WRITE (READ_CONTROL)
+#define STANDARD_RIGHTS_ALL (0x001F0000L)
+
+#define KEY_READ ((STANDARD_RIGHTS_READ |\
+ KEY_QUERY_VALUE |\
+ KEY_ENUMERATE_SUB_KEYS |\
+ KEY_NOTIFY) \
+ & \
+ (~SYNCHRONIZE))
+
+
+#define KEY_WRITE ((STANDARD_RIGHTS_WRITE |\
+ KEY_SET_VALUE |\
+ KEY_CREATE_SUB_KEY) \
+ & \
+ (~SYNCHRONIZE))
+
+#define KEY_ALL_ACCESS ((STANDARD_RIGHTS_ALL |\
+ KEY_QUERY_VALUE |\
+ KEY_SET_VALUE |\
+ KEY_CREATE_SUB_KEY |\
+ KEY_ENUMERATE_SUB_KEYS |\
+ KEY_NOTIFY |\
+ KEY_CREATE_LINK) \
+ & \
+ (~SYNCHRONIZE))
+
+// clang-format on
+
+// Define some macros needed when prototyping Windows functions.
+
+#define DECLSPEC_IMPORT __declspec(dllimport)
+#define WINBASEAPI DECLSPEC_IMPORT
+#define WINUSERAPI DECLSPEC_IMPORT
+#define WINAPI __stdcall
+#define CALLBACK __stdcall
+
+// Needed for optimal lock performance.
+WINBASEAPI _Releases_exclusive_lock_(*SRWLock) VOID WINAPI
+ ReleaseSRWLockExclusive(_Inout_ PSRWLOCK SRWLock);
+
+// Needed to support protobuf's GetMessage macro magic.
+WINUSERAPI BOOL WINAPI GetMessageW(_Out_ LPMSG lpMsg,
+ _In_opt_ HWND hWnd,
+ _In_ UINT wMsgFilterMin,
+ _In_ UINT wMsgFilterMax);
+
+// Needed for thread_local_storage.h
+WINBASEAPI LPVOID WINAPI TlsGetValue(_In_ DWORD dwTlsIndex);
+
+// Needed for scoped_handle.h
+WINBASEAPI _Check_return_ _Post_equals_last_error_ DWORD WINAPI
+ GetLastError(VOID);
+
+WINBASEAPI VOID WINAPI SetLastError(_In_ DWORD dwErrCode);
+
+#ifdef __cplusplus
+}
+#endif
+
+// These macros are all defined by windows.h and are also used as the names of
+// functions in the Chromium code base. Add to this list as needed whenever
+// there is a Windows macro which causes a function call to be renamed. This
+// ensures that the same renaming will happen everywhere. Includes of this file
+// can be added wherever needed to ensure this consistent renaming.
+
+#define CopyFile CopyFileW
+#define CreateDirectory CreateDirectoryW
+#define CreateEvent CreateEventW
+#define CreateFile CreateFileW
+#define CreateService CreateServiceW
+#define DeleteFile DeleteFileW
+#define DispatchMessage DispatchMessageW
+#define DrawText DrawTextW
+#define FindFirstFile FindFirstFileW
+#define FindNextFile FindNextFileW
+#define GetComputerName GetComputerNameW
+#define GetCurrentDirectory GetCurrentDirectoryW
+#define GetCurrentTime() GetTickCount()
+#define GetFileAttributes GetFileAttributesW
+#define GetMessage GetMessageW
+#define GetUserName GetUserNameW
+#define LoadIcon LoadIconW
+#define LoadImage LoadImageW
+#define PostMessage PostMessageW
+#define RemoveDirectory RemoveDirectoryW
+#define ReplaceFile ReplaceFileW
+#define ReportEvent ReportEventW
+#define SendMessage SendMessageW
+#define SendMessageCallback SendMessageCallbackW
+#define SetCurrentDirectory SetCurrentDirectoryW
+#define StartService StartServiceW
+#define UpdateResource UpdateResourceW
+
+#endif // BASE_WIN_WINDOWS_TYPES_H
diff --git a/security/sandbox/chromium/base/win/windows_version.cc b/security/sandbox/chromium/base/win/windows_version.cc
new file mode 100644
index 0000000000..ef96f8796f
--- /dev/null
+++ b/security/sandbox/chromium/base/win/windows_version.cc
@@ -0,0 +1,313 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/windows_version.h"
+
+#include <windows.h>
+
+#include <memory>
+#include <tuple>
+#include <utility>
+
+#include "base/file_version_info_win.h"
+#include "base/files/file_path.h"
+#include "base/logging.h"
+#include "base/no_destructor.h"
+#include "base/strings/string_util.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/win/registry.h"
+
+#if !defined(__clang__) && _MSC_FULL_VER < 191125507
+#error VS 2017 Update 3.2 or higher is required
+#endif
+
+#if !defined(NTDDI_WIN10_RS4)
+#error Windows 10.0.17134.0 SDK or higher required.
+#endif
+
+namespace base {
+namespace win {
+
+namespace {
+
+// The values under the CurrentVersion registry hive are mirrored under
+// the corresponding Wow6432 hive.
+constexpr wchar_t kRegKeyWindowsNTCurrentVersion[] =
+ L"SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion";
+
+// Returns the "UBR" (Windows 10 patch number) and "ReleaseId" (Windows 10
+// release number) from the registry. "UBR" is an undocumented value and will be
+// 0 if the value was not found. "ReleaseId" will be an empty string if the
+// value is not found.
+std::pair<int, std::string> GetVersionData() {
+ DWORD ubr = 0;
+ std::wstring release_id;
+ RegKey key;
+
+ if (key.Open(HKEY_LOCAL_MACHINE, kRegKeyWindowsNTCurrentVersion,
+ KEY_QUERY_VALUE) == ERROR_SUCCESS) {
+ key.ReadValueDW(L"UBR", &ubr);
+ key.ReadValue(L"ReleaseId", &release_id);
+ }
+
+ return std::make_pair(static_cast<int>(ubr), WideToUTF8(release_id));
+}
+
+const _SYSTEM_INFO& GetSystemInfoStorage() {
+ static const NoDestructor<_SYSTEM_INFO> system_info([] {
+ _SYSTEM_INFO info = {};
+ ::GetNativeSystemInfo(&info);
+ return info;
+ }());
+ return *system_info;
+}
+
+} // namespace
+
+// static
+OSInfo** OSInfo::GetInstanceStorage() {
+ // Note: we don't use the Singleton class because it depends on AtExitManager,
+ // and it's convenient for other modules to use this class without it.
+ static OSInfo* info = []() {
+ _OSVERSIONINFOEXW version_info = {sizeof(version_info)};
+ ::GetVersionEx(reinterpret_cast<_OSVERSIONINFOW*>(&version_info));
+
+ DWORD os_type = 0;
+ ::GetProductInfo(version_info.dwMajorVersion, version_info.dwMinorVersion,
+ 0, 0, &os_type);
+
+ return new OSInfo(version_info, GetSystemInfoStorage(), os_type);
+ }();
+
+ return &info;
+}
+
+// static
+OSInfo* OSInfo::GetInstance() {
+ return *GetInstanceStorage();
+}
+
+// static
+OSInfo::WindowsArchitecture OSInfo::GetArchitecture() {
+ switch (GetSystemInfoStorage().wProcessorArchitecture) {
+ case PROCESSOR_ARCHITECTURE_INTEL:
+ return X86_ARCHITECTURE;
+ case PROCESSOR_ARCHITECTURE_AMD64:
+ return X64_ARCHITECTURE;
+ case PROCESSOR_ARCHITECTURE_IA64:
+ return IA64_ARCHITECTURE;
+ case PROCESSOR_ARCHITECTURE_ARM64:
+ return ARM64_ARCHITECTURE;
+ default:
+ return OTHER_ARCHITECTURE;
+ }
+}
+
+OSInfo::OSInfo(const _OSVERSIONINFOEXW& version_info,
+ const _SYSTEM_INFO& system_info,
+ int os_type)
+ : version_(Version::PRE_XP),
+ wow64_status_(GetWOW64StatusForProcess(GetCurrentProcess())) {
+ version_number_.major = version_info.dwMajorVersion;
+ version_number_.minor = version_info.dwMinorVersion;
+ version_number_.build = version_info.dwBuildNumber;
+ std::tie(version_number_.patch, release_id_) = GetVersionData();
+ version_ = MajorMinorBuildToVersion(
+ version_number_.major, version_number_.minor, version_number_.build);
+ service_pack_.major = version_info.wServicePackMajor;
+ service_pack_.minor = version_info.wServicePackMinor;
+ service_pack_str_ = WideToUTF8(version_info.szCSDVersion);
+
+ processors_ = system_info.dwNumberOfProcessors;
+ allocation_granularity_ = system_info.dwAllocationGranularity;
+
+ if (version_info.dwMajorVersion == 6 || version_info.dwMajorVersion == 10) {
+ // Only present on Vista+.
+ switch (os_type) {
+ case PRODUCT_CLUSTER_SERVER:
+ case PRODUCT_DATACENTER_SERVER:
+ case PRODUCT_DATACENTER_SERVER_CORE:
+ case PRODUCT_ENTERPRISE_SERVER:
+ case PRODUCT_ENTERPRISE_SERVER_CORE:
+ case PRODUCT_ENTERPRISE_SERVER_IA64:
+ case PRODUCT_SMALLBUSINESS_SERVER:
+ case PRODUCT_SMALLBUSINESS_SERVER_PREMIUM:
+ case PRODUCT_STANDARD_SERVER:
+ case PRODUCT_STANDARD_SERVER_CORE:
+ case PRODUCT_WEB_SERVER:
+ version_type_ = SUITE_SERVER;
+ break;
+ case PRODUCT_PROFESSIONAL:
+ case PRODUCT_ULTIMATE:
+ version_type_ = SUITE_PROFESSIONAL;
+ break;
+ case PRODUCT_ENTERPRISE:
+ case PRODUCT_ENTERPRISE_E:
+ case PRODUCT_ENTERPRISE_EVALUATION:
+ case PRODUCT_ENTERPRISE_N:
+ case PRODUCT_ENTERPRISE_N_EVALUATION:
+ case PRODUCT_ENTERPRISE_S:
+ case PRODUCT_ENTERPRISE_S_EVALUATION:
+ case PRODUCT_ENTERPRISE_S_N:
+ case PRODUCT_ENTERPRISE_S_N_EVALUATION:
+ case PRODUCT_BUSINESS:
+ case PRODUCT_BUSINESS_N:
+ version_type_ = SUITE_ENTERPRISE;
+ break;
+ case PRODUCT_EDUCATION:
+ case PRODUCT_EDUCATION_N:
+ version_type_ = SUITE_EDUCATION;
+ break;
+ case PRODUCT_HOME_BASIC:
+ case PRODUCT_HOME_PREMIUM:
+ case PRODUCT_STARTER:
+ default:
+ version_type_ = SUITE_HOME;
+ break;
+ }
+ } else if (version_info.dwMajorVersion == 5 &&
+ version_info.dwMinorVersion == 2) {
+ if (version_info.wProductType == VER_NT_WORKSTATION &&
+ system_info.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) {
+ version_type_ = SUITE_PROFESSIONAL;
+ } else if (version_info.wSuiteMask & VER_SUITE_WH_SERVER) {
+ version_type_ = SUITE_HOME;
+ } else {
+ version_type_ = SUITE_SERVER;
+ }
+ } else if (version_info.dwMajorVersion == 5 &&
+ version_info.dwMinorVersion == 1) {
+ if (version_info.wSuiteMask & VER_SUITE_PERSONAL)
+ version_type_ = SUITE_HOME;
+ else
+ version_type_ = SUITE_PROFESSIONAL;
+ } else {
+ // Windows is pre XP so we don't care but pick a safe default.
+ version_type_ = SUITE_HOME;
+ }
+}
+
+OSInfo::~OSInfo() = default;
+
+Version OSInfo::Kernel32Version() const {
+ static const Version kernel32_version =
+ MajorMinorBuildToVersion(Kernel32BaseVersion().components()[0],
+ Kernel32BaseVersion().components()[1],
+ Kernel32BaseVersion().components()[2]);
+ return kernel32_version;
+}
+
+Version OSInfo::UcrtVersion() const {
+ auto ucrt_version_info = FileVersionInfoWin::CreateFileVersionInfoWin(
+ FilePath(FILE_PATH_LITERAL("ucrtbase.dll")));
+ if (ucrt_version_info) {
+ auto ucrt_components = ucrt_version_info->GetFileVersion().components();
+ if (ucrt_components.size() == 4) {
+ return MajorMinorBuildToVersion(ucrt_components[0], ucrt_components[1],
+ ucrt_components[2]);
+ }
+ }
+ return Version();
+}
+
+// Retrieve a version from kernel32. This is useful because when running in
+// compatibility mode for a down-level version of the OS, the file version of
+// kernel32 will still be the "real" version.
+base::Version OSInfo::Kernel32BaseVersion() const {
+ static const NoDestructor<base::Version> version([] {
+ std::unique_ptr<FileVersionInfoWin> file_version_info =
+ FileVersionInfoWin::CreateFileVersionInfoWin(
+ FilePath(FILE_PATH_LITERAL("kernel32.dll")));
+ if (!file_version_info) {
+ // crbug.com/912061: on some systems it seems kernel32.dll might be
+ // corrupted or not in a state to get version info. In this case try
+ // kernelbase.dll as a fallback.
+ file_version_info = FileVersionInfoWin::CreateFileVersionInfoWin(
+ FilePath(FILE_PATH_LITERAL("kernelbase.dll")));
+ }
+ CHECK(file_version_info);
+ return file_version_info->GetFileVersion();
+ }());
+ return *version;
+}
+
+std::string OSInfo::processor_model_name() {
+ if (processor_model_name_.empty()) {
+ const wchar_t kProcessorNameString[] =
+ L"HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0";
+ RegKey key(HKEY_LOCAL_MACHINE, kProcessorNameString, KEY_READ);
+ std::wstring value;
+ key.ReadValue(L"ProcessorNameString", &value);
+ processor_model_name_ = WideToUTF8(value);
+ }
+ return processor_model_name_;
+}
+
+// static
+OSInfo::WOW64Status OSInfo::GetWOW64StatusForProcess(HANDLE process_handle) {
+ BOOL is_wow64 = FALSE;
+ if (!::IsWow64Process(process_handle, &is_wow64))
+ return WOW64_UNKNOWN;
+ return is_wow64 ? WOW64_ENABLED : WOW64_DISABLED;
+}
+
+// With the exception of Server 2003, server variants are treated the same as
+// the corresponding workstation release.
+// static
+Version OSInfo::MajorMinorBuildToVersion(int major, int minor, int build) {
+ if (major == 10) {
+ if (build >= 19041)
+ return Version::WIN10_20H1;
+ if (build >= 18362)
+ return Version::WIN10_19H1;
+ if (build >= 17763)
+ return Version::WIN10_RS5;
+ if (build >= 17134)
+ return Version::WIN10_RS4;
+ if (build >= 16299)
+ return Version::WIN10_RS3;
+ if (build >= 15063)
+ return Version::WIN10_RS2;
+ if (build >= 14393)
+ return Version::WIN10_RS1;
+ if (build >= 10586)
+ return Version::WIN10_TH2;
+ return Version::WIN10;
+ }
+
+ if (major > 6) {
+ // Hitting this likely means that it's time for a >10 block above.
+ NOTREACHED() << major << "." << minor << "." << build;
+ return Version::WIN_LAST;
+ }
+
+ if (major == 6) {
+ switch (minor) {
+ case 0:
+ return Version::VISTA;
+ case 1:
+ return Version::WIN7;
+ case 2:
+ return Version::WIN8;
+ default:
+ DCHECK_EQ(minor, 3);
+ return Version::WIN8_1;
+ }
+ }
+
+ if (major == 5 && minor != 0) {
+ // Treat XP Pro x64, Home Server, and Server 2003 R2 as Server 2003.
+ return minor == 1 ? Version::XP : Version::SERVER_2003;
+ }
+
+ // Win 2000 or older.
+ return Version::PRE_XP;
+}
+
+Version GetVersion() {
+ return OSInfo::GetInstance()->version();
+}
+
+} // namespace win
+} // namespace base
diff --git a/security/sandbox/chromium/base/win/windows_version.h b/security/sandbox/chromium/base/win/windows_version.h
new file mode 100644
index 0000000000..08d6e274ef
--- /dev/null
+++ b/security/sandbox/chromium/base/win/windows_version.h
@@ -0,0 +1,187 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_WINDOWS_VERSION_H_
+#define BASE_WIN_WINDOWS_VERSION_H_
+
+#include <stddef.h>
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/gtest_prod_util.h"
+#include "base/macros.h"
+#include "base/version.h"
+
+using HANDLE = void*;
+struct _OSVERSIONINFOEXW;
+struct _SYSTEM_INFO;
+
+namespace base {
+namespace test {
+class ScopedOSInfoOverride;
+} // namespace test
+} // namespace base
+
+namespace base {
+namespace win {
+
+// The running version of Windows. This is declared outside OSInfo for
+// syntactic sugar reasons; see the declaration of GetVersion() below.
+// NOTE: Keep these in order so callers can do things like
+// "if (base::win::GetVersion() >= base::win::Version::VISTA) ...".
+//
+// This enum is used in metrics histograms, so they shouldn't be reordered or
+// removed. New values can be added before Version::WIN_LAST.
+enum class Version {
+ PRE_XP = 0, // Not supported.
+ XP = 1,
+ SERVER_2003 = 2, // Also includes XP Pro x64 and Server 2003 R2.
+ VISTA = 3, // Also includes Windows Server 2008.
+ WIN7 = 4, // Also includes Windows Server 2008 R2.
+ WIN8 = 5, // Also includes Windows Server 2012.
+ WIN8_1 = 6, // Also includes Windows Server 2012 R2.
+ WIN10 = 7, // Threshold 1: Version 1507, Build 10240.
+ WIN10_TH2 = 8, // Threshold 2: Version 1511, Build 10586.
+ WIN10_RS1 = 9, // Redstone 1: Version 1607, Build 14393.
+ WIN10_RS2 = 10, // Redstone 2: Version 1703, Build 15063.
+ WIN10_RS3 = 11, // Redstone 3: Version 1709, Build 16299.
+ WIN10_RS4 = 12, // Redstone 4: Version 1803, Build 17134.
+ WIN10_RS5 = 13, // Redstone 5: Version 1809, Build 17763.
+ WIN10_19H1 = 14, // 19H1: Version 1903, Build 18362.
+ WIN10_20H1 = 15, // 20H1: Version 2004, Build 19041.
+ // On edit, update tools\metrics\histograms\enums.xml "WindowsVersion" and
+ // "GpuBlacklistFeatureTestResultsWindows2".
+ WIN_LAST, // Indicates error condition.
+};
+
+// A rough bucketing of the available types of versions of Windows. This is used
+// to distinguish enterprise enabled versions from home versions and potentially
+// server versions. Keep these values in the same order, since they are used as
+// is for metrics histogram ids.
+enum VersionType {
+ SUITE_HOME = 0,
+ SUITE_PROFESSIONAL,
+ SUITE_SERVER,
+ SUITE_ENTERPRISE,
+ SUITE_EDUCATION,
+ SUITE_LAST,
+};
+
+// A singleton that can be used to query various pieces of information about the
+// OS and process state. Note that this doesn't use the base Singleton class, so
+// it can be used without an AtExitManager.
+class BASE_EXPORT OSInfo {
+ public:
+ struct VersionNumber {
+ int major;
+ int minor;
+ int build;
+ int patch;
+ };
+
+ struct ServicePack {
+ int major;
+ int minor;
+ };
+
+ // The processor architecture this copy of Windows natively uses. For
+ // example, given an x64-capable processor, we have three possibilities:
+ // 32-bit Chrome running on 32-bit Windows: X86_ARCHITECTURE
+ // 32-bit Chrome running on 64-bit Windows via WOW64: X64_ARCHITECTURE
+ // 64-bit Chrome running on 64-bit Windows: X64_ARCHITECTURE
+ enum WindowsArchitecture {
+ X86_ARCHITECTURE,
+ X64_ARCHITECTURE,
+ IA64_ARCHITECTURE,
+ ARM64_ARCHITECTURE,
+ OTHER_ARCHITECTURE,
+ };
+
+ // Whether a process is running under WOW64 (the wrapper that allows 32-bit
+ // processes to run on 64-bit versions of Windows). This will return
+ // WOW64_DISABLED for both "32-bit Chrome on 32-bit Windows" and "64-bit
+ // Chrome on 64-bit Windows". WOW64_UNKNOWN means "an error occurred", e.g.
+ // the process does not have sufficient access rights to determine this.
+ enum WOW64Status {
+ WOW64_DISABLED,
+ WOW64_ENABLED,
+ WOW64_UNKNOWN,
+ };
+
+ static OSInfo* GetInstance();
+
+ // Separate from the rest of OSInfo so it can be used during early process
+ // initialization.
+ static WindowsArchitecture GetArchitecture();
+
+ // Like wow64_status(), but for the supplied handle instead of the current
+ // process. This doesn't touch member state, so you can bypass the singleton.
+ static WOW64Status GetWOW64StatusForProcess(HANDLE process_handle);
+
+ const Version& version() const { return version_; }
+ Version Kernel32Version() const;
+ Version UcrtVersion() const;
+ base::Version Kernel32BaseVersion() const;
+ // The next two functions return arrays of values, [major, minor(, build)].
+ const VersionNumber& version_number() const { return version_number_; }
+ const VersionType& version_type() const { return version_type_; }
+ const ServicePack& service_pack() const { return service_pack_; }
+ const std::string& service_pack_str() const { return service_pack_str_; }
+ const int& processors() const { return processors_; }
+ const size_t& allocation_granularity() const {
+ return allocation_granularity_;
+ }
+ const WOW64Status& wow64_status() const { return wow64_status_; }
+ std::string processor_model_name();
+ const std::string& release_id() const { return release_id_; }
+
+ private:
+ friend class base::test::ScopedOSInfoOverride;
+ FRIEND_TEST_ALL_PREFIXES(OSInfo, MajorMinorBuildToVersion);
+ static OSInfo** GetInstanceStorage();
+
+ OSInfo(const _OSVERSIONINFOEXW& version_info,
+ const _SYSTEM_INFO& system_info,
+ int os_type);
+ ~OSInfo();
+
+ // Returns a Version value for a given OS version tuple.
+ static Version MajorMinorBuildToVersion(int major, int minor, int build);
+
+ Version version_;
+ VersionNumber version_number_;
+ VersionType version_type_;
+ ServicePack service_pack_;
+
+ // Represents the version of the OS associated to a release of
+ // Windows 10. Each version may have different releases (such as patch
+ // updates). This is the identifier of the release.
+ // Example:
+ // Windows 10 Version 1809 (OS build 17763) has multiple releases
+ // (i.e. build 17763.1, build 17763.195, build 17763.379, ...).
+ // See https://docs.microsoft.com/en-us/windows/windows-10/release-information
+ // for more information.
+ std::string release_id_;
+
+ // A string, such as "Service Pack 3", that indicates the latest Service Pack
+ // installed on the system. If no Service Pack has been installed, the string
+ // is empty.
+ std::string service_pack_str_;
+ int processors_;
+ size_t allocation_granularity_;
+ WOW64Status wow64_status_;
+ std::string processor_model_name_;
+
+ DISALLOW_COPY_AND_ASSIGN(OSInfo);
+};
+
+// Because this is by far the most commonly-requested value from the above
+// singleton, we add a global-scope accessor here as syntactic sugar.
+BASE_EXPORT Version GetVersion();
+
+} // namespace win
+} // namespace base
+
+#endif // BASE_WIN_WINDOWS_VERSION_H_