summaryrefslogtreecommitdiffstats
path: root/security/sandbox/chromium/base/memory
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 09:22:09 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 09:22:09 +0000
commit43a97878ce14b72f0981164f87f2e35e14151312 (patch)
tree620249daf56c0258faa40cbdcf9cfba06de2a846 /security/sandbox/chromium/base/memory
parentInitial commit. (diff)
downloadfirefox-43a97878ce14b72f0981164f87f2e35e14151312.tar.xz
firefox-43a97878ce14b72f0981164f87f2e35e14151312.zip
Adding upstream version 110.0.1.upstream/110.0.1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'security/sandbox/chromium/base/memory')
-rw-r--r--security/sandbox/chromium/base/memory/aligned_memory.h60
-rw-r--r--security/sandbox/chromium/base/memory/free_deleter.h25
-rw-r--r--security/sandbox/chromium/base/memory/platform_shared_memory_region.cc62
-rw-r--r--security/sandbox/chromium/base/memory/platform_shared_memory_region.h301
-rw-r--r--security/sandbox/chromium/base/memory/platform_shared_memory_region_win.cc343
-rw-r--r--security/sandbox/chromium/base/memory/ptr_util.h23
-rw-r--r--security/sandbox/chromium/base/memory/raw_scoped_refptr_mismatch_checker.h52
-rw-r--r--security/sandbox/chromium/base/memory/ref_counted.cc105
-rw-r--r--security/sandbox/chromium/base/memory/ref_counted.h463
-rw-r--r--security/sandbox/chromium/base/memory/scoped_refptr.h375
-rw-r--r--security/sandbox/chromium/base/memory/shared_memory_mapping.cc115
-rw-r--r--security/sandbox/chromium/base/memory/shared_memory_mapping.h252
-rw-r--r--security/sandbox/chromium/base/memory/singleton.h279
-rw-r--r--security/sandbox/chromium/base/memory/unsafe_shared_memory_region.cc80
-rw-r--r--security/sandbox/chromium/base/memory/unsafe_shared_memory_region.h127
-rw-r--r--security/sandbox/chromium/base/memory/weak_ptr.h395
16 files changed, 3057 insertions, 0 deletions
diff --git a/security/sandbox/chromium/base/memory/aligned_memory.h b/security/sandbox/chromium/base/memory/aligned_memory.h
new file mode 100644
index 0000000000..a242b730be
--- /dev/null
+++ b/security/sandbox/chromium/base/memory/aligned_memory.h
@@ -0,0 +1,60 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_ALIGNED_MEMORY_H_
+#define BASE_MEMORY_ALIGNED_MEMORY_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <type_traits>
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "build/build_config.h"
+
+#if defined(COMPILER_MSVC)
+#include <malloc.h>
+#else
+#include <stdlib.h>
+#endif
+
+// A runtime sized aligned allocation can be created:
+//
+// float* my_array = static_cast<float*>(AlignedAlloc(size, alignment));
+//
+// // ... later, to release the memory:
+// AlignedFree(my_array);
+//
+// Or using unique_ptr:
+//
+// std::unique_ptr<float, AlignedFreeDeleter> my_array(
+// static_cast<float*>(AlignedAlloc(size, alignment)));
+
+namespace base {
+
+// This can be replaced with std::aligned_alloc when we have C++17.
+// Caveat: std::aligned_alloc requires the size parameter be an integral
+// multiple of alignment.
+BASE_EXPORT void* AlignedAlloc(size_t size, size_t alignment);
+
+inline void AlignedFree(void* ptr) {
+#if defined(COMPILER_MSVC)
+ _aligned_free(ptr);
+#else
+ free(ptr);
+#endif
+}
+
+// Deleter for use with unique_ptr. E.g., use as
+// std::unique_ptr<Foo, base::AlignedFreeDeleter> foo;
+struct AlignedFreeDeleter {
+ inline void operator()(void* ptr) const {
+ AlignedFree(ptr);
+ }
+};
+
+} // namespace base
+
+#endif // BASE_MEMORY_ALIGNED_MEMORY_H_
diff --git a/security/sandbox/chromium/base/memory/free_deleter.h b/security/sandbox/chromium/base/memory/free_deleter.h
new file mode 100644
index 0000000000..5604118865
--- /dev/null
+++ b/security/sandbox/chromium/base/memory/free_deleter.h
@@ -0,0 +1,25 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_FREE_DELETER_H_
+#define BASE_MEMORY_FREE_DELETER_H_
+
+#include <stdlib.h>
+
+namespace base {
+
+// Function object which invokes 'free' on its parameter, which must be
+// a pointer. Can be used to store malloc-allocated pointers in std::unique_ptr:
+//
+// std::unique_ptr<int, base::FreeDeleter> foo_ptr(
+// static_cast<int*>(malloc(sizeof(int))));
+struct FreeDeleter {
+ inline void operator()(void* ptr) const {
+ free(ptr);
+ }
+};
+
+} // namespace base
+
+#endif // BASE_MEMORY_FREE_DELETER_H_
diff --git a/security/sandbox/chromium/base/memory/platform_shared_memory_region.cc b/security/sandbox/chromium/base/memory/platform_shared_memory_region.cc
new file mode 100644
index 0000000000..45647925b3
--- /dev/null
+++ b/security/sandbox/chromium/base/memory/platform_shared_memory_region.cc
@@ -0,0 +1,62 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/platform_shared_memory_region.h"
+
+#include "base/memory/shared_memory_mapping.h"
+#include "base/numerics/checked_math.h"
+
+namespace base {
+namespace subtle {
+
+// static
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::CreateWritable(
+ size_t size) {
+ return Create(Mode::kWritable, size);
+}
+
+// static
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::CreateUnsafe(
+ size_t size) {
+ return Create(Mode::kUnsafe, size);
+}
+
+PlatformSharedMemoryRegion::PlatformSharedMemoryRegion() = default;
+PlatformSharedMemoryRegion::PlatformSharedMemoryRegion(
+ PlatformSharedMemoryRegion&& other) = default;
+PlatformSharedMemoryRegion& PlatformSharedMemoryRegion::operator=(
+ PlatformSharedMemoryRegion&& other) = default;
+PlatformSharedMemoryRegion::~PlatformSharedMemoryRegion() = default;
+
+PlatformSharedMemoryRegion::ScopedPlatformHandle
+PlatformSharedMemoryRegion::PassPlatformHandle() {
+ return std::move(handle_);
+}
+
+bool PlatformSharedMemoryRegion::MapAt(off_t offset,
+ size_t size,
+ void** memory,
+ size_t* mapped_size) const {
+ if (!IsValid())
+ return false;
+
+ if (size == 0)
+ return false;
+
+ size_t end_byte;
+ if (!CheckAdd(offset, size).AssignIfValid(&end_byte) || end_byte > size_) {
+ return false;
+ }
+
+ bool success = MapAtInternal(offset, size, memory, mapped_size);
+ if (success) {
+ DCHECK_EQ(
+ 0U, reinterpret_cast<uintptr_t>(*memory) & (kMapMinimumAlignment - 1));
+ }
+
+ return success;
+}
+
+} // namespace subtle
+} // namespace base
diff --git a/security/sandbox/chromium/base/memory/platform_shared_memory_region.h b/security/sandbox/chromium/base/memory/platform_shared_memory_region.h
new file mode 100644
index 0000000000..220cbdd65e
--- /dev/null
+++ b/security/sandbox/chromium/base/memory/platform_shared_memory_region.h
@@ -0,0 +1,301 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_PLATFORM_SHARED_MEMORY_REGION_H_
+#define BASE_MEMORY_PLATFORM_SHARED_MEMORY_REGION_H_
+
+#include <utility>
+
+#include "base/compiler_specific.h"
+#include "base/gtest_prod_util.h"
+#include "base/macros.h"
+#include "base/unguessable_token.h"
+#include "build/build_config.h"
+
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+#include <mach/mach.h>
+#include "base/mac/scoped_mach_port.h"
+#elif defined(OS_FUCHSIA)
+#include <lib/zx/vmo.h>
+#elif defined(OS_WIN)
+#include "base/win/scoped_handle.h"
+#include "base/win/windows_types.h"
+#elif defined(OS_POSIX)
+#include <sys/types.h>
+#include "base/file_descriptor_posix.h"
+#include "base/files/scoped_file.h"
+#endif
+
+#if defined(OS_LINUX)
+namespace content {
+class SandboxIPCHandler;
+}
+#endif
+
+namespace base {
+namespace subtle {
+
+#if defined(OS_POSIX) && (!defined(OS_MACOSX) || defined(OS_IOS)) && \
+ !defined(OS_ANDROID)
+// Helper structs to keep two descriptors on POSIX. It's needed to support
+// ConvertToReadOnly().
+struct BASE_EXPORT FDPair {
+ // The main shared memory descriptor that is used for mapping. May be either
+ // writable or read-only, depending on region's mode.
+ int fd;
+ // The read-only descriptor, valid only in kWritable mode. Replaces |fd| when
+ // a region is converted to read-only.
+ int readonly_fd;
+};
+
+struct BASE_EXPORT ScopedFDPair {
+ ScopedFDPair();
+ ScopedFDPair(ScopedFD in_fd, ScopedFD in_readonly_fd);
+ ScopedFDPair(ScopedFDPair&&);
+ ScopedFDPair& operator=(ScopedFDPair&&);
+ ~ScopedFDPair();
+
+ FDPair get() const;
+
+ ScopedFD fd;
+ ScopedFD readonly_fd;
+};
+#endif
+
+// Implementation class for shared memory regions.
+//
+// This class does the following:
+//
+// - Wraps and owns a shared memory region platform handle.
+// - Provides a way to allocate a new region of platform shared memory of given
+// size.
+// - Provides a way to create mapping of the region in the current process'
+// address space, under special access-control constraints (see Mode).
+// - Provides methods to help transferring the handle across process boundaries.
+// - Holds a 128-bit unique identifier used to uniquely identify the same
+// kernel region resource across processes (used for memory tracking).
+// - Has a method to retrieve the region's size in bytes.
+//
+// IMPORTANT NOTE: Users should never use this directly, but
+// ReadOnlySharedMemoryRegion, WritableSharedMemoryRegion or
+// UnsafeSharedMemoryRegion since this is an implementation class.
+class BASE_EXPORT PlatformSharedMemoryRegion {
+ public:
+ // Permission mode of the platform handle. Each mode corresponds to one of the
+ // typed shared memory classes:
+ //
+ // * ReadOnlySharedMemoryRegion: A region that can only create read-only
+ // mappings.
+ //
+ // * WritableSharedMemoryRegion: A region that can only create writable
+ // mappings. The region can be demoted to ReadOnlySharedMemoryRegion without
+ // the possibility of promoting back to writable.
+ //
+ // * UnsafeSharedMemoryRegion: A region that can only create writable
+ // mappings. The region cannot be demoted to ReadOnlySharedMemoryRegion.
+ enum class Mode {
+ kReadOnly, // ReadOnlySharedMemoryRegion
+ kWritable, // WritableSharedMemoryRegion
+ kUnsafe, // UnsafeSharedMemoryRegion
+ kMaxValue = kUnsafe
+ };
+
+ // Errors that can occur during Shared Memory construction.
+ // These match tools/metrics/histograms/enums.xml.
+ // This enum is append-only.
+ enum class CreateError {
+ SUCCESS = 0,
+ SIZE_ZERO = 1,
+ SIZE_TOO_LARGE = 2,
+ INITIALIZE_ACL_FAILURE = 3,
+ INITIALIZE_SECURITY_DESC_FAILURE = 4,
+ SET_SECURITY_DESC_FAILURE = 5,
+ CREATE_FILE_MAPPING_FAILURE = 6,
+ REDUCE_PERMISSIONS_FAILURE = 7,
+ ALREADY_EXISTS = 8,
+ ALLOCATE_FILE_REGION_FAILURE = 9,
+ FSTAT_FAILURE = 10,
+ INODES_MISMATCH = 11,
+ GET_SHMEM_TEMP_DIR_FAILURE = 12,
+ kMaxValue = GET_SHMEM_TEMP_DIR_FAILURE
+ };
+
+#if defined(OS_LINUX)
+ // Structure to limit access to executable region creation.
+ struct ExecutableRegion {
+ private:
+ // Creates a new shared memory region the unsafe mode (writable and not and
+ // convertible to read-only), and in addition marked executable. A ScopedFD
+ // to this region is returned. Any any mapping will have to be done
+ // manually, including setting executable permissions if necessary
+ //
+ // This is only used to support sandbox_ipc_linux.cc, and should not be used
+ // anywhere else in chrome. This is restricted via AllowCreateExecutable.
+ // TODO(crbug.com/982879): remove this when NaCl is unshipped.
+ //
+ // Returns an invalid ScopedFD if the call fails.
+ static ScopedFD CreateFD(size_t size);
+
+ friend class content::SandboxIPCHandler;
+ };
+#endif
+
+// Platform-specific shared memory type used by this class.
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+ using PlatformHandle = mach_port_t;
+ using ScopedPlatformHandle = mac::ScopedMachSendRight;
+#elif defined(OS_FUCHSIA)
+ using PlatformHandle = zx::unowned_vmo;
+ using ScopedPlatformHandle = zx::vmo;
+#elif defined(OS_WIN)
+ using PlatformHandle = HANDLE;
+ using ScopedPlatformHandle = win::ScopedHandle;
+#elif defined(OS_ANDROID)
+ using PlatformHandle = int;
+ using ScopedPlatformHandle = ScopedFD;
+#else
+ using PlatformHandle = FDPair;
+ using ScopedPlatformHandle = ScopedFDPair;
+#endif
+
+ // The minimum alignment in bytes that any mapped address produced by Map()
+ // and MapAt() is guaranteed to have.
+ enum { kMapMinimumAlignment = 32 };
+
+ // Creates a new PlatformSharedMemoryRegion with corresponding mode and size.
+ // Creating in kReadOnly mode isn't supported because then there will be no
+ // way to modify memory content.
+ static PlatformSharedMemoryRegion CreateWritable(size_t size);
+ static PlatformSharedMemoryRegion CreateUnsafe(size_t size);
+
+ // Returns a new PlatformSharedMemoryRegion that takes ownership of the
+ // |handle|. All parameters must be taken from another valid
+ // PlatformSharedMemoryRegion instance, e.g. |size| must be equal to the
+ // actual region size as allocated by the kernel.
+ // Closes the |handle| and returns an invalid instance if passed parameters
+ // are invalid.
+ static PlatformSharedMemoryRegion Take(ScopedPlatformHandle handle,
+ Mode mode,
+ size_t size,
+ const UnguessableToken& guid);
+#if defined(OS_POSIX) && !defined(OS_ANDROID) && \
+ !(defined(OS_MACOSX) && !defined(OS_IOS))
+ // Specialized version of Take() for POSIX that takes only one file descriptor
+ // instead of pair. Cannot be used with kWritable |mode|.
+ static PlatformSharedMemoryRegion Take(ScopedFD handle,
+ Mode mode,
+ size_t size,
+ const UnguessableToken& guid);
+#endif
+
+ // Default constructor initializes an invalid instance, i.e. an instance that
+ // doesn't wrap any valid platform handle.
+ PlatformSharedMemoryRegion();
+
+ // Move operations are allowed.
+ PlatformSharedMemoryRegion(PlatformSharedMemoryRegion&&);
+ PlatformSharedMemoryRegion& operator=(PlatformSharedMemoryRegion&&);
+
+ // Destructor closes the platform handle. Does nothing if the handle is
+ // invalid.
+ ~PlatformSharedMemoryRegion();
+
+ // Passes ownership of the platform handle to the caller. The current instance
+ // becomes invalid. It's the responsibility of the caller to close the
+ // handle. If the current instance is invalid, ScopedPlatformHandle will also
+ // be invalid.
+ ScopedPlatformHandle PassPlatformHandle() WARN_UNUSED_RESULT;
+
+ // Returns the platform handle. The current instance keeps ownership of this
+ // handle.
+ PlatformHandle GetPlatformHandle() const;
+
+ // Whether the platform handle is valid.
+ bool IsValid() const;
+
+ // Duplicates the platform handle and creates a new PlatformSharedMemoryRegion
+ // with the same |mode_|, |size_| and |guid_| that owns this handle. Returns
+ // invalid region on failure, the current instance remains valid.
+ // Can be called only in kReadOnly and kUnsafe modes, CHECK-fails if is
+ // called in kWritable mode.
+ PlatformSharedMemoryRegion Duplicate() const;
+
+ // Converts the region to read-only. Returns whether the operation succeeded.
+ // Makes the current instance invalid on failure. Can be called only in
+ // kWritable mode, all other modes will CHECK-fail. The object will have
+ // kReadOnly mode after this call on success.
+ bool ConvertToReadOnly();
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+ // Same as above, but |mapped_addr| is used as a hint to avoid additional
+ // mapping of the memory object.
+ // |mapped_addr| must be mapped location of |memory_object_|. If the location
+ // is unknown, |mapped_addr| should be |nullptr|.
+ bool ConvertToReadOnly(void* mapped_addr);
+#endif // defined(OS_MACOSX) && !defined(OS_IOS)
+
+ // Converts the region to unsafe. Returns whether the operation succeeded.
+ // Makes the current instance invalid on failure. Can be called only in
+ // kWritable mode, all other modes will CHECK-fail. The object will have
+ // kUnsafe mode after this call on success.
+ bool ConvertToUnsafe();
+
+ // Maps |size| bytes of the shared memory region starting with the given
+ // |offset| into the caller's address space. |offset| must be aligned to value
+ // of |SysInfo::VMAllocationGranularity()|. Fails if requested bytes are out
+ // of the region limits.
+ // Returns true and sets |memory| and |mapped_size| on success, returns false
+ // and leaves output parameters in unspecified state otherwise. The mapped
+ // address is guaranteed to have an alignment of at least
+ // |kMapMinimumAlignment|.
+ bool MapAt(off_t offset,
+ size_t size,
+ void** memory,
+ size_t* mapped_size) const;
+
+ const UnguessableToken& GetGUID() const { return guid_; }
+
+ size_t GetSize() const { return size_; }
+
+ Mode GetMode() const { return mode_; }
+
+ private:
+ FRIEND_TEST_ALL_PREFIXES(PlatformSharedMemoryRegionTest,
+ CreateReadOnlyRegionDeathTest);
+ FRIEND_TEST_ALL_PREFIXES(PlatformSharedMemoryRegionTest,
+ CheckPlatformHandlePermissionsCorrespondToMode);
+ static PlatformSharedMemoryRegion Create(Mode mode,
+ size_t size
+#if defined(OS_LINUX)
+ ,
+ bool executable = false
+#endif
+ );
+
+ static bool CheckPlatformHandlePermissionsCorrespondToMode(
+ PlatformHandle handle,
+ Mode mode,
+ size_t size);
+
+ PlatformSharedMemoryRegion(ScopedPlatformHandle handle,
+ Mode mode,
+ size_t size,
+ const UnguessableToken& guid);
+
+ bool MapAtInternal(off_t offset,
+ size_t size,
+ void** memory,
+ size_t* mapped_size) const;
+
+ ScopedPlatformHandle handle_;
+ Mode mode_ = Mode::kReadOnly;
+ size_t size_ = 0;
+ UnguessableToken guid_;
+
+ DISALLOW_COPY_AND_ASSIGN(PlatformSharedMemoryRegion);
+};
+
+} // namespace subtle
+} // namespace base
+
+#endif // BASE_MEMORY_PLATFORM_SHARED_MEMORY_REGION_H_
diff --git a/security/sandbox/chromium/base/memory/platform_shared_memory_region_win.cc b/security/sandbox/chromium/base/memory/platform_shared_memory_region_win.cc
new file mode 100644
index 0000000000..c2f3704f91
--- /dev/null
+++ b/security/sandbox/chromium/base/memory/platform_shared_memory_region_win.cc
@@ -0,0 +1,343 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/platform_shared_memory_region.h"
+
+#include <aclapi.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/allocator/partition_allocator/page_allocator.h"
+#include "base/bits.h"
+#include "base/metrics/histogram_functions.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/process/process_handle.h"
+#include "base/rand_util.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/win/windows_version.h"
+
+namespace base {
+namespace subtle {
+
+namespace {
+
+// Emits UMA metrics about encountered errors. Pass zero (0) for |winerror|
+// if there is no associated Windows error.
+void LogError(PlatformSharedMemoryRegion::CreateError error, DWORD winerror) {
+ UMA_HISTOGRAM_ENUMERATION("SharedMemory.CreateError", error);
+ static_assert(ERROR_SUCCESS == 0, "Windows error code changed!");
+ if (winerror != ERROR_SUCCESS)
+ UmaHistogramSparse("SharedMemory.CreateWinError", winerror);
+}
+
+typedef enum _SECTION_INFORMATION_CLASS {
+ SectionBasicInformation,
+} SECTION_INFORMATION_CLASS;
+
+typedef struct _SECTION_BASIC_INFORMATION {
+ PVOID BaseAddress;
+ ULONG Attributes;
+ LARGE_INTEGER Size;
+} SECTION_BASIC_INFORMATION, *PSECTION_BASIC_INFORMATION;
+
+typedef ULONG(__stdcall* NtQuerySectionType)(
+ HANDLE SectionHandle,
+ SECTION_INFORMATION_CLASS SectionInformationClass,
+ PVOID SectionInformation,
+ ULONG SectionInformationLength,
+ PULONG ResultLength);
+
+// Returns the length of the memory section starting at the supplied address.
+size_t GetMemorySectionSize(void* address) {
+ MEMORY_BASIC_INFORMATION memory_info;
+ if (!::VirtualQuery(address, &memory_info, sizeof(memory_info)))
+ return 0;
+ return memory_info.RegionSize -
+ (static_cast<char*>(address) -
+ static_cast<char*>(memory_info.AllocationBase));
+}
+
+// Checks if the section object is safe to map. At the moment this just means
+// it's not an image section.
+bool IsSectionSafeToMap(HANDLE handle) {
+ static NtQuerySectionType nt_query_section_func =
+ reinterpret_cast<NtQuerySectionType>(
+ ::GetProcAddress(::GetModuleHandle(L"ntdll.dll"), "NtQuerySection"));
+ DCHECK(nt_query_section_func);
+
+ // The handle must have SECTION_QUERY access for this to succeed.
+ SECTION_BASIC_INFORMATION basic_information = {};
+ ULONG status =
+ nt_query_section_func(handle, SectionBasicInformation, &basic_information,
+ sizeof(basic_information), nullptr);
+ if (status)
+ return false;
+ return (basic_information.Attributes & SEC_IMAGE) != SEC_IMAGE;
+}
+
+// Returns a HANDLE on success and |nullptr| on failure.
+// This function is similar to CreateFileMapping, but removes the permissions
+// WRITE_DAC, WRITE_OWNER, READ_CONTROL, and DELETE.
+//
+// A newly created file mapping has two sets of permissions. It has access
+// control permissions (WRITE_DAC, WRITE_OWNER, READ_CONTROL, and DELETE) and
+// file permissions (FILE_MAP_READ, FILE_MAP_WRITE, etc.). The Chrome sandbox
+// prevents HANDLEs with the WRITE_DAC permission from being duplicated into
+// unprivileged processes.
+//
+// In order to remove the access control permissions, after being created the
+// handle is duplicated with only the file access permissions.
+HANDLE CreateFileMappingWithReducedPermissions(SECURITY_ATTRIBUTES* sa,
+ size_t rounded_size,
+ LPCWSTR name) {
+ HANDLE h = CreateFileMapping(INVALID_HANDLE_VALUE, sa, PAGE_READWRITE, 0,
+ static_cast<DWORD>(rounded_size), name);
+ if (!h) {
+ LogError(
+ PlatformSharedMemoryRegion::CreateError::CREATE_FILE_MAPPING_FAILURE,
+ GetLastError());
+ return nullptr;
+ }
+
+ HANDLE h2;
+ ProcessHandle process = GetCurrentProcess();
+ BOOL success = ::DuplicateHandle(
+ process, h, process, &h2, FILE_MAP_READ | FILE_MAP_WRITE | SECTION_QUERY,
+ FALSE, 0);
+ BOOL rv = ::CloseHandle(h);
+ DCHECK(rv);
+
+ if (!success) {
+ LogError(
+ PlatformSharedMemoryRegion::CreateError::REDUCE_PERMISSIONS_FAILURE,
+ GetLastError());
+ return nullptr;
+ }
+
+ return h2;
+}
+
+} // namespace
+
+// static
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Take(
+ win::ScopedHandle handle,
+ Mode mode,
+ size_t size,
+ const UnguessableToken& guid) {
+ if (!handle.IsValid())
+ return {};
+
+ if (size == 0)
+ return {};
+
+ if (size > static_cast<size_t>(std::numeric_limits<int>::max()))
+ return {};
+
+ if (!IsSectionSafeToMap(handle.Get()))
+ return {};
+
+ CHECK(
+ CheckPlatformHandlePermissionsCorrespondToMode(handle.Get(), mode, size));
+
+ return PlatformSharedMemoryRegion(std::move(handle), mode, size, guid);
+}
+
+HANDLE PlatformSharedMemoryRegion::GetPlatformHandle() const {
+ return handle_.Get();
+}
+
+bool PlatformSharedMemoryRegion::IsValid() const {
+ return handle_.IsValid();
+}
+
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Duplicate() const {
+ if (!IsValid())
+ return {};
+
+ CHECK_NE(mode_, Mode::kWritable)
+ << "Duplicating a writable shared memory region is prohibited";
+
+ HANDLE duped_handle;
+ ProcessHandle process = GetCurrentProcess();
+ BOOL success =
+ ::DuplicateHandle(process, handle_.Get(), process, &duped_handle, 0,
+ FALSE, DUPLICATE_SAME_ACCESS);
+ if (!success)
+ return {};
+
+ return PlatformSharedMemoryRegion(win::ScopedHandle(duped_handle), mode_,
+ size_, guid_);
+}
+
+bool PlatformSharedMemoryRegion::ConvertToReadOnly() {
+ if (!IsValid())
+ return false;
+
+ CHECK_EQ(mode_, Mode::kWritable)
+ << "Only writable shared memory region can be converted to read-only";
+
+ win::ScopedHandle handle_copy(handle_.Take());
+
+ HANDLE duped_handle;
+ ProcessHandle process = GetCurrentProcess();
+ BOOL success =
+ ::DuplicateHandle(process, handle_copy.Get(), process, &duped_handle,
+ FILE_MAP_READ | SECTION_QUERY, FALSE, 0);
+ if (!success)
+ return false;
+
+ handle_.Set(duped_handle);
+ mode_ = Mode::kReadOnly;
+ return true;
+}
+
+bool PlatformSharedMemoryRegion::ConvertToUnsafe() {
+ if (!IsValid())
+ return false;
+
+ CHECK_EQ(mode_, Mode::kWritable)
+ << "Only writable shared memory region can be converted to unsafe";
+
+ mode_ = Mode::kUnsafe;
+ return true;
+}
+
+bool PlatformSharedMemoryRegion::MapAtInternal(off_t offset,
+ size_t size,
+ void** memory,
+ size_t* mapped_size) const {
+ bool write_allowed = mode_ != Mode::kReadOnly;
+ // Try to map the shared memory. On the first failure, release any reserved
+ // address space for a single entry.
+ for (int i = 0; i < 2; ++i) {
+ *memory = MapViewOfFile(
+ handle_.Get(), FILE_MAP_READ | (write_allowed ? FILE_MAP_WRITE : 0),
+ static_cast<uint64_t>(offset) >> 32, static_cast<DWORD>(offset), size);
+ if (*memory)
+ break;
+ ReleaseReservation();
+ }
+ if (!*memory) {
+ DPLOG(ERROR) << "Failed executing MapViewOfFile";
+ return false;
+ }
+
+ *mapped_size = GetMemorySectionSize(*memory);
+ return true;
+}
+
+// static
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Create(Mode mode,
+ size_t size) {
+ // TODO(crbug.com/210609): NaCl forces us to round up 64k here, wasting 32k
+ // per mapping on average.
+ static const size_t kSectionSize = 65536;
+ if (size == 0) {
+ LogError(CreateError::SIZE_ZERO, 0);
+ return {};
+ }
+
+ // Aligning may overflow so check that the result doesn't decrease.
+ size_t rounded_size = bits::Align(size, kSectionSize);
+ if (rounded_size < size ||
+ rounded_size > static_cast<size_t>(std::numeric_limits<int>::max())) {
+ LogError(CreateError::SIZE_TOO_LARGE, 0);
+ return {};
+ }
+
+ CHECK_NE(mode, Mode::kReadOnly) << "Creating a region in read-only mode will "
+ "lead to this region being non-modifiable";
+
+ // Add an empty DACL to enforce anonymous read-only sections.
+ ACL dacl;
+ SECURITY_DESCRIPTOR sd;
+ if (!InitializeAcl(&dacl, sizeof(dacl), ACL_REVISION)) {
+ LogError(CreateError::INITIALIZE_ACL_FAILURE, GetLastError());
+ return {};
+ }
+ if (!InitializeSecurityDescriptor(&sd, SECURITY_DESCRIPTOR_REVISION)) {
+ LogError(CreateError::INITIALIZE_SECURITY_DESC_FAILURE, GetLastError());
+ return {};
+ }
+ if (!SetSecurityDescriptorDacl(&sd, TRUE, &dacl, FALSE)) {
+ LogError(CreateError::SET_SECURITY_DESC_FAILURE, GetLastError());
+ return {};
+ }
+
+ string16 name;
+ if (win::GetVersion() < win::Version::WIN8_1) {
+ // Windows < 8.1 ignores DACLs on certain unnamed objects (like shared
+ // sections). So, we generate a random name when we need to enforce
+ // read-only.
+ uint64_t rand_values[4];
+ RandBytes(&rand_values, sizeof(rand_values));
+ name = ASCIIToUTF16(StringPrintf("CrSharedMem_%016llx%016llx%016llx%016llx",
+ rand_values[0], rand_values[1],
+ rand_values[2], rand_values[3]));
+ DCHECK(!name.empty());
+ }
+
+ SECURITY_ATTRIBUTES sa = {sizeof(sa), &sd, FALSE};
+ // Ask for the file mapping with reduced permisions to avoid passing the
+ // access control permissions granted by default into unpriviledged process.
+ HANDLE h = CreateFileMappingWithReducedPermissions(
+ &sa, rounded_size, name.empty() ? nullptr : as_wcstr(name));
+ if (h == nullptr) {
+ // The error is logged within CreateFileMappingWithReducedPermissions().
+ return {};
+ }
+
+ win::ScopedHandle scoped_h(h);
+ // Check if the shared memory pre-exists.
+ if (GetLastError() == ERROR_ALREADY_EXISTS) {
+ LogError(CreateError::ALREADY_EXISTS, ERROR_ALREADY_EXISTS);
+ return {};
+ }
+
+ LogError(CreateError::SUCCESS, ERROR_SUCCESS);
+ return PlatformSharedMemoryRegion(std::move(scoped_h), mode, size,
+ UnguessableToken::Create());
+}
+
+// static
+bool PlatformSharedMemoryRegion::CheckPlatformHandlePermissionsCorrespondToMode(
+ PlatformHandle handle,
+ Mode mode,
+ size_t size) {
+ // Call ::DuplicateHandle() with FILE_MAP_WRITE as a desired access to check
+ // if the |handle| has a write access.
+ ProcessHandle process = GetCurrentProcess();
+ HANDLE duped_handle;
+ BOOL success = ::DuplicateHandle(process, handle, process, &duped_handle,
+ FILE_MAP_WRITE, FALSE, 0);
+ if (success) {
+ BOOL rv = ::CloseHandle(duped_handle);
+ DCHECK(rv);
+ }
+
+ bool is_read_only = !success;
+ bool expected_read_only = mode == Mode::kReadOnly;
+
+ if (is_read_only != expected_read_only) {
+ DLOG(ERROR) << "File mapping handle has wrong access rights: it is"
+ << (is_read_only ? " " : " not ") << "read-only but it should"
+ << (expected_read_only ? " " : " not ") << "be";
+ return false;
+ }
+
+ return true;
+}
+
+PlatformSharedMemoryRegion::PlatformSharedMemoryRegion(
+ win::ScopedHandle handle,
+ Mode mode,
+ size_t size,
+ const UnguessableToken& guid)
+ : handle_(std::move(handle)), mode_(mode), size_(size), guid_(guid) {}
+
+} // namespace subtle
+} // namespace base
diff --git a/security/sandbox/chromium/base/memory/ptr_util.h b/security/sandbox/chromium/base/memory/ptr_util.h
new file mode 100644
index 0000000000..42f4f49eeb
--- /dev/null
+++ b/security/sandbox/chromium/base/memory/ptr_util.h
@@ -0,0 +1,23 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_PTR_UTIL_H_
+#define BASE_MEMORY_PTR_UTIL_H_
+
+#include <memory>
+#include <utility>
+
+namespace base {
+
+// Helper to transfer ownership of a raw pointer to a std::unique_ptr<T>.
+// Note that std::unique_ptr<T> has very different semantics from
+// std::unique_ptr<T[]>: do not use this helper for array allocations.
+template <typename T>
+std::unique_ptr<T> WrapUnique(T* ptr) {
+ return std::unique_ptr<T>(ptr);
+}
+
+} // namespace base
+
+#endif // BASE_MEMORY_PTR_UTIL_H_
diff --git a/security/sandbox/chromium/base/memory/raw_scoped_refptr_mismatch_checker.h b/security/sandbox/chromium/base/memory/raw_scoped_refptr_mismatch_checker.h
new file mode 100644
index 0000000000..ab8b2abcbb
--- /dev/null
+++ b/security/sandbox/chromium/base/memory/raw_scoped_refptr_mismatch_checker.h
@@ -0,0 +1,52 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_RAW_SCOPED_REFPTR_MISMATCH_CHECKER_H_
+#define BASE_MEMORY_RAW_SCOPED_REFPTR_MISMATCH_CHECKER_H_
+
+#include <type_traits>
+
+#include "base/template_util.h"
+
+// It is dangerous to post a task with a T* argument where T is a subtype of
+// RefCounted(Base|ThreadSafeBase), since by the time the parameter is used, the
+// object may already have been deleted since it was not held with a
+// scoped_refptr. Example: http://crbug.com/27191
+// The following set of traits are designed to generate a compile error
+// whenever this antipattern is attempted.
+
+namespace base {
+
+// This is a base internal implementation file used by task.h and callback.h.
+// Not for public consumption, so we wrap it in namespace internal.
+namespace internal {
+
+template <typename T, typename = void>
+struct IsRefCountedType : std::false_type {};
+
+template <typename T>
+struct IsRefCountedType<T,
+ void_t<decltype(std::declval<T*>()->AddRef()),
+ decltype(std::declval<T*>()->Release())>>
+ : std::true_type {};
+
+template <typename T>
+struct NeedsScopedRefptrButGetsRawPtr {
+ static_assert(!std::is_reference<T>::value,
+ "NeedsScopedRefptrButGetsRawPtr requires non-reference type.");
+
+ enum {
+ // Human readable translation: you needed to be a scoped_refptr if you are a
+ // raw pointer type and are convertible to a RefCounted(Base|ThreadSafeBase)
+ // type.
+ value = std::is_pointer<T>::value &&
+ IsRefCountedType<std::remove_pointer_t<T>>::value
+ };
+};
+
+} // namespace internal
+
+} // namespace base
+
+#endif // BASE_MEMORY_RAW_SCOPED_REFPTR_MISMATCH_CHECKER_H_
diff --git a/security/sandbox/chromium/base/memory/ref_counted.cc b/security/sandbox/chromium/base/memory/ref_counted.cc
new file mode 100644
index 0000000000..0a8d32ebf0
--- /dev/null
+++ b/security/sandbox/chromium/base/memory/ref_counted.cc
@@ -0,0 +1,105 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/ref_counted.h"
+
+#include <limits>
+#include <type_traits>
+
+#include "base/threading/thread_collision_warner.h"
+
+namespace base {
+namespace {
+
+#if DCHECK_IS_ON()
+std::atomic_int g_cross_thread_ref_count_access_allow_count(0);
+#endif
+
+} // namespace
+
+namespace subtle {
+
+bool RefCountedThreadSafeBase::HasOneRef() const {
+ return ref_count_.IsOne();
+}
+
+bool RefCountedThreadSafeBase::HasAtLeastOneRef() const {
+ return !ref_count_.IsZero();
+}
+
+#if DCHECK_IS_ON()
+RefCountedThreadSafeBase::~RefCountedThreadSafeBase() {
+ DCHECK(in_dtor_) << "RefCountedThreadSafe object deleted without "
+ "calling Release()";
+}
+#endif
+
+// For security and correctness, we check the arithmetic on ref counts.
+//
+// In an attempt to avoid binary bloat (from inlining the `CHECK`), we define
+// these functions out-of-line. However, compilers are wily. Further testing may
+// show that `NOINLINE` helps or hurts.
+//
+#if defined(ARCH_CPU_64_BITS)
+void RefCountedBase::AddRefImpl() const {
+ // An attacker could induce use-after-free bugs, and potentially exploit them,
+ // by creating so many references to a ref-counted object that the reference
+ // count overflows. On 32-bit architectures, there is not enough address space
+ // to succeed. But on 64-bit architectures, it might indeed be possible.
+ // Therefore, we can elide the check for arithmetic overflow on 32-bit, but we
+ // must check on 64-bit.
+ //
+ // Make sure the addition didn't wrap back around to 0. This form of check
+ // works because we assert that `ref_count_` is an unsigned integer type.
+ CHECK(++ref_count_ != 0);
+}
+
+void RefCountedBase::ReleaseImpl() const {
+ // Make sure the subtraction didn't wrap back around from 0 to the max value.
+ // That could cause memory leaks, and may induce application-semantic
+ // correctness or safety bugs. (E.g. what if we really needed that object to
+ // be destroyed at the right time?)
+ //
+ // Note that unlike with overflow, underflow could also happen on 32-bit
+ // architectures. Arguably, we should do this check on32-bit machines too.
+ CHECK(--ref_count_ != std::numeric_limits<decltype(ref_count_)>::max());
+}
+#endif
+
+#if !defined(ARCH_CPU_X86_FAMILY)
+bool RefCountedThreadSafeBase::Release() const {
+ return ReleaseImpl();
+}
+void RefCountedThreadSafeBase::AddRef() const {
+ AddRefImpl();
+}
+void RefCountedThreadSafeBase::AddRefWithCheck() const {
+ AddRefWithCheckImpl();
+}
+#endif
+
+#if DCHECK_IS_ON()
+bool RefCountedBase::CalledOnValidSequence() const {
+#if defined(MOZ_SANDBOX)
+ return true;
+#else
+ return sequence_checker_.CalledOnValidSequence() ||
+ g_cross_thread_ref_count_access_allow_count.load() != 0;
+#endif
+}
+#endif
+
+} // namespace subtle
+
+#if DCHECK_IS_ON()
+ScopedAllowCrossThreadRefCountAccess::ScopedAllowCrossThreadRefCountAccess() {
+ ++g_cross_thread_ref_count_access_allow_count;
+}
+
+ScopedAllowCrossThreadRefCountAccess::~ScopedAllowCrossThreadRefCountAccess() {
+ --g_cross_thread_ref_count_access_allow_count;
+}
+#endif
+
+} // namespace base
diff --git a/security/sandbox/chromium/base/memory/ref_counted.h b/security/sandbox/chromium/base/memory/ref_counted.h
new file mode 100644
index 0000000000..ac7183a49d
--- /dev/null
+++ b/security/sandbox/chromium/base/memory/ref_counted.h
@@ -0,0 +1,463 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_REF_COUNTED_H_
+#define BASE_MEMORY_REF_COUNTED_H_
+
+#include <stddef.h>
+
+#include <utility>
+
+#include "base/atomic_ref_count.h"
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "base/gtest_prod_util.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/scoped_refptr.h"
+#include "base/sequence_checker.h"
+#include "base/threading/thread_collision_warner.h"
+#include "build/build_config.h"
+
+namespace base {
+namespace subtle {
+
+class BASE_EXPORT RefCountedBase {
+ public:
+ bool HasOneRef() const { return ref_count_ == 1; }
+ bool HasAtLeastOneRef() const { return ref_count_ >= 1; }
+
+ protected:
+ explicit RefCountedBase(StartRefCountFromZeroTag) {
+#if DCHECK_IS_ON()
+ sequence_checker_.DetachFromSequence();
+#endif
+ }
+
+ explicit RefCountedBase(StartRefCountFromOneTag) : ref_count_(1) {
+#if DCHECK_IS_ON()
+ needs_adopt_ref_ = true;
+ sequence_checker_.DetachFromSequence();
+#endif
+ }
+
+ ~RefCountedBase() {
+#if DCHECK_IS_ON()
+ DCHECK(in_dtor_) << "RefCounted object deleted without calling Release()";
+#endif
+ }
+
+ void AddRef() const {
+ // TODO(maruel): Add back once it doesn't assert 500 times/sec.
+ // Current thread books the critical section "AddRelease"
+ // without release it.
+ // DFAKE_SCOPED_LOCK_THREAD_LOCKED(add_release_);
+#if DCHECK_IS_ON()
+ DCHECK(!in_dtor_);
+ DCHECK(!needs_adopt_ref_)
+ << "This RefCounted object is created with non-zero reference count."
+ << " The first reference to such a object has to be made by AdoptRef or"
+ << " MakeRefCounted.";
+ if (ref_count_ >= 1) {
+ DCHECK(CalledOnValidSequence());
+ }
+#endif
+
+ AddRefImpl();
+ }
+
+ // Returns true if the object should self-delete.
+ bool Release() const {
+ ReleaseImpl();
+
+ // TODO(maruel): Add back once it doesn't assert 500 times/sec.
+ // Current thread books the critical section "AddRelease"
+ // without release it.
+ // DFAKE_SCOPED_LOCK_THREAD_LOCKED(add_release_);
+
+#if DCHECK_IS_ON()
+ DCHECK(!in_dtor_);
+ if (ref_count_ == 0)
+ in_dtor_ = true;
+
+ if (ref_count_ >= 1)
+ DCHECK(CalledOnValidSequence());
+ if (ref_count_ == 1)
+ sequence_checker_.DetachFromSequence();
+#endif
+
+ return ref_count_ == 0;
+ }
+
+ // Returns true if it is safe to read or write the object, from a thread
+ // safety standpoint. Should be DCHECK'd from the methods of RefCounted
+ // classes if there is a danger of objects being shared across threads.
+ //
+ // This produces fewer false positives than adding a separate SequenceChecker
+ // into the subclass, because it automatically detaches from the sequence when
+ // the reference count is 1 (and never fails if there is only one reference).
+ //
+ // This means unlike a separate SequenceChecker, it will permit a singly
+ // referenced object to be passed between threads (not holding a reference on
+ // the sending thread), but will trap if the sending thread holds onto a
+ // reference, or if the object is accessed from multiple threads
+ // simultaneously.
+ bool IsOnValidSequence() const {
+#if DCHECK_IS_ON()
+ return ref_count_ <= 1 || CalledOnValidSequence();
+#else
+ return true;
+#endif
+ }
+
+ private:
+ template <typename U>
+ friend scoped_refptr<U> base::AdoptRef(U*);
+
+ FRIEND_TEST_ALL_PREFIXES(RefCountedDeathTest, TestOverflowCheck);
+
+ void Adopted() const {
+#if DCHECK_IS_ON()
+ DCHECK(needs_adopt_ref_);
+ needs_adopt_ref_ = false;
+#endif
+ }
+
+#if defined(ARCH_CPU_64_BITS)
+ void AddRefImpl() const;
+ void ReleaseImpl() const;
+#else
+ void AddRefImpl() const { ++ref_count_; }
+ void ReleaseImpl() const { --ref_count_; }
+#endif
+
+#if DCHECK_IS_ON()
+ bool CalledOnValidSequence() const;
+#endif
+
+ mutable uint32_t ref_count_ = 0;
+ static_assert(std::is_unsigned<decltype(ref_count_)>::value,
+ "ref_count_ must be an unsigned type.");
+
+#if DCHECK_IS_ON()
+ mutable bool needs_adopt_ref_ = false;
+ mutable bool in_dtor_ = false;
+ mutable SequenceChecker sequence_checker_;
+#endif
+
+ DFAKE_MUTEX(add_release_);
+
+ DISALLOW_COPY_AND_ASSIGN(RefCountedBase);
+};
+
+class BASE_EXPORT RefCountedThreadSafeBase {
+ public:
+ bool HasOneRef() const;
+ bool HasAtLeastOneRef() const;
+
+ protected:
+ explicit constexpr RefCountedThreadSafeBase(StartRefCountFromZeroTag) {}
+ explicit constexpr RefCountedThreadSafeBase(StartRefCountFromOneTag)
+ : ref_count_(1) {
+#if DCHECK_IS_ON()
+ needs_adopt_ref_ = true;
+#endif
+ }
+
+#if DCHECK_IS_ON()
+ ~RefCountedThreadSafeBase();
+#else
+ ~RefCountedThreadSafeBase() = default;
+#endif
+
+// Release and AddRef are suitable for inlining on X86 because they generate
+// very small code sequences. On other platforms (ARM), it causes a size
+// regression and is probably not worth it.
+#if defined(ARCH_CPU_X86_FAMILY)
+ // Returns true if the object should self-delete.
+ bool Release() const { return ReleaseImpl(); }
+ void AddRef() const { AddRefImpl(); }
+ void AddRefWithCheck() const { AddRefWithCheckImpl(); }
+#else
+ // Returns true if the object should self-delete.
+ bool Release() const;
+ void AddRef() const;
+ void AddRefWithCheck() const;
+#endif
+
+ private:
+ template <typename U>
+ friend scoped_refptr<U> base::AdoptRef(U*);
+
+ void Adopted() const {
+#if DCHECK_IS_ON()
+ DCHECK(needs_adopt_ref_);
+ needs_adopt_ref_ = false;
+#endif
+ }
+
+ ALWAYS_INLINE void AddRefImpl() const {
+#if DCHECK_IS_ON()
+ DCHECK(!in_dtor_);
+ DCHECK(!needs_adopt_ref_)
+ << "This RefCounted object is created with non-zero reference count."
+ << " The first reference to such a object has to be made by AdoptRef or"
+ << " MakeRefCounted.";
+#endif
+ ref_count_.Increment();
+ }
+
+ ALWAYS_INLINE void AddRefWithCheckImpl() const {
+#if DCHECK_IS_ON()
+ DCHECK(!in_dtor_);
+ DCHECK(!needs_adopt_ref_)
+ << "This RefCounted object is created with non-zero reference count."
+ << " The first reference to such a object has to be made by AdoptRef or"
+ << " MakeRefCounted.";
+#endif
+ CHECK(ref_count_.Increment() > 0);
+ }
+
+ ALWAYS_INLINE bool ReleaseImpl() const {
+#if DCHECK_IS_ON()
+ DCHECK(!in_dtor_);
+ DCHECK(!ref_count_.IsZero());
+#endif
+ if (!ref_count_.Decrement()) {
+#if DCHECK_IS_ON()
+ in_dtor_ = true;
+#endif
+ return true;
+ }
+ return false;
+ }
+
+ mutable AtomicRefCount ref_count_{0};
+#if DCHECK_IS_ON()
+ mutable bool needs_adopt_ref_ = false;
+ mutable bool in_dtor_ = false;
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(RefCountedThreadSafeBase);
+};
+
+} // namespace subtle
+
+// ScopedAllowCrossThreadRefCountAccess disables the check documented on
+// RefCounted below for rare pre-existing use cases where thread-safety was
+// guaranteed through other means (e.g. explicit sequencing of calls across
+// execution sequences when bouncing between threads in order). New callers
+// should refrain from using this (callsites handling thread-safety through
+// locks should use RefCountedThreadSafe per the overhead of its atomics being
+// negligible compared to locks anyways and callsites doing explicit sequencing
+// should properly std::move() the ref to avoid hitting this check).
+// TODO(tzik): Cleanup existing use cases and remove
+// ScopedAllowCrossThreadRefCountAccess.
+class BASE_EXPORT ScopedAllowCrossThreadRefCountAccess final {
+ public:
+#if DCHECK_IS_ON()
+ ScopedAllowCrossThreadRefCountAccess();
+ ~ScopedAllowCrossThreadRefCountAccess();
+#else
+ ScopedAllowCrossThreadRefCountAccess() {}
+ ~ScopedAllowCrossThreadRefCountAccess() {}
+#endif
+};
+
+//
+// A base class for reference counted classes. Otherwise, known as a cheap
+// knock-off of WebKit's RefCounted<T> class. To use this, just extend your
+// class from it like so:
+//
+// class MyFoo : public base::RefCounted<MyFoo> {
+// ...
+// private:
+// friend class base::RefCounted<MyFoo>;
+// ~MyFoo();
+// };
+//
+// You should always make your destructor non-public, to avoid any code deleting
+// the object accidently while there are references to it.
+//
+//
+// The ref count manipulation to RefCounted is NOT thread safe and has DCHECKs
+// to trap unsafe cross thread usage. A subclass instance of RefCounted can be
+// passed to another execution sequence only when its ref count is 1. If the ref
+// count is more than 1, the RefCounted class verifies the ref updates are made
+// on the same execution sequence as the previous ones. The subclass can also
+// manually call IsOnValidSequence to trap other non-thread-safe accesses; see
+// the documentation for that method.
+//
+//
+// The reference count starts from zero by default, and we intended to migrate
+// to start-from-one ref count. Put REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE() to
+// the ref counted class to opt-in.
+//
+// If an object has start-from-one ref count, the first scoped_refptr need to be
+// created by base::AdoptRef() or base::MakeRefCounted(). We can use
+// base::MakeRefCounted() to create create both type of ref counted object.
+//
+// The motivations to use start-from-one ref count are:
+// - Start-from-one ref count doesn't need the ref count increment for the
+// first reference.
+// - It can detect an invalid object acquisition for a being-deleted object
+// that has zero ref count. That tends to happen on custom deleter that
+// delays the deletion.
+// TODO(tzik): Implement invalid acquisition detection.
+// - Behavior parity to Blink's WTF::RefCounted, whose count starts from one.
+// And start-from-one ref count is a step to merge WTF::RefCounted into
+// base::RefCounted.
+//
+#define REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE() \
+ static constexpr ::base::subtle::StartRefCountFromOneTag \
+ kRefCountPreference = ::base::subtle::kStartRefCountFromOneTag
+
+template <class T, typename Traits>
+class RefCounted;
+
+template <typename T>
+struct DefaultRefCountedTraits {
+ static void Destruct(const T* x) {
+ RefCounted<T, DefaultRefCountedTraits>::DeleteInternal(x);
+ }
+};
+
+template <class T, typename Traits = DefaultRefCountedTraits<T>>
+class RefCounted : public subtle::RefCountedBase {
+ public:
+ static constexpr subtle::StartRefCountFromZeroTag kRefCountPreference =
+ subtle::kStartRefCountFromZeroTag;
+
+ RefCounted() : subtle::RefCountedBase(T::kRefCountPreference) {}
+
+ void AddRef() const {
+ subtle::RefCountedBase::AddRef();
+ }
+
+ void Release() const {
+ if (subtle::RefCountedBase::Release()) {
+ // Prune the code paths which the static analyzer may take to simulate
+ // object destruction. Use-after-free errors aren't possible given the
+ // lifetime guarantees of the refcounting system.
+ ANALYZER_SKIP_THIS_PATH();
+
+ Traits::Destruct(static_cast<const T*>(this));
+ }
+ }
+
+ protected:
+ ~RefCounted() = default;
+
+ private:
+ friend struct DefaultRefCountedTraits<T>;
+ template <typename U>
+ static void DeleteInternal(const U* x) {
+ delete x;
+ }
+
+ DISALLOW_COPY_AND_ASSIGN(RefCounted);
+};
+
+// Forward declaration.
+template <class T, typename Traits> class RefCountedThreadSafe;
+
+// Default traits for RefCountedThreadSafe<T>. Deletes the object when its ref
+// count reaches 0. Overload to delete it on a different thread etc.
+template<typename T>
+struct DefaultRefCountedThreadSafeTraits {
+ static void Destruct(const T* x) {
+ // Delete through RefCountedThreadSafe to make child classes only need to be
+ // friend with RefCountedThreadSafe instead of this struct, which is an
+ // implementation detail.
+ RefCountedThreadSafe<T,
+ DefaultRefCountedThreadSafeTraits>::DeleteInternal(x);
+ }
+};
+
+//
+// A thread-safe variant of RefCounted<T>
+//
+// class MyFoo : public base::RefCountedThreadSafe<MyFoo> {
+// ...
+// };
+//
+// If you're using the default trait, then you should add compile time
+// asserts that no one else is deleting your object. i.e.
+// private:
+// friend class base::RefCountedThreadSafe<MyFoo>;
+// ~MyFoo();
+//
+// We can use REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE() with RefCountedThreadSafe
+// too. See the comment above the RefCounted definition for details.
+template <class T, typename Traits = DefaultRefCountedThreadSafeTraits<T> >
+class RefCountedThreadSafe : public subtle::RefCountedThreadSafeBase {
+ public:
+ static constexpr subtle::StartRefCountFromZeroTag kRefCountPreference =
+ subtle::kStartRefCountFromZeroTag;
+
+ explicit RefCountedThreadSafe()
+ : subtle::RefCountedThreadSafeBase(T::kRefCountPreference) {}
+
+ void AddRef() const { AddRefImpl(T::kRefCountPreference); }
+
+ void Release() const {
+ if (subtle::RefCountedThreadSafeBase::Release()) {
+ ANALYZER_SKIP_THIS_PATH();
+ Traits::Destruct(static_cast<const T*>(this));
+ }
+ }
+
+ protected:
+ ~RefCountedThreadSafe() = default;
+
+ private:
+ friend struct DefaultRefCountedThreadSafeTraits<T>;
+ template <typename U>
+ static void DeleteInternal(const U* x) {
+ delete x;
+ }
+
+ void AddRefImpl(subtle::StartRefCountFromZeroTag) const {
+ subtle::RefCountedThreadSafeBase::AddRef();
+ }
+
+ void AddRefImpl(subtle::StartRefCountFromOneTag) const {
+ subtle::RefCountedThreadSafeBase::AddRefWithCheck();
+ }
+
+ DISALLOW_COPY_AND_ASSIGN(RefCountedThreadSafe);
+};
+
+//
+// A thread-safe wrapper for some piece of data so we can place other
+// things in scoped_refptrs<>.
+//
+template<typename T>
+class RefCountedData
+ : public base::RefCountedThreadSafe< base::RefCountedData<T> > {
+ public:
+ RefCountedData() : data() {}
+ RefCountedData(const T& in_value) : data(in_value) {}
+ RefCountedData(T&& in_value) : data(std::move(in_value)) {}
+
+ T data;
+
+ private:
+ friend class base::RefCountedThreadSafe<base::RefCountedData<T> >;
+ ~RefCountedData() = default;
+};
+
+template <typename T>
+bool operator==(const RefCountedData<T>& lhs, const RefCountedData<T>& rhs) {
+ return lhs.data == rhs.data;
+}
+
+template <typename T>
+bool operator!=(const RefCountedData<T>& lhs, const RefCountedData<T>& rhs) {
+ return !(lhs == rhs);
+}
+
+} // namespace base
+
+#endif // BASE_MEMORY_REF_COUNTED_H_
diff --git a/security/sandbox/chromium/base/memory/scoped_refptr.h b/security/sandbox/chromium/base/memory/scoped_refptr.h
new file mode 100644
index 0000000000..238b61a736
--- /dev/null
+++ b/security/sandbox/chromium/base/memory/scoped_refptr.h
@@ -0,0 +1,375 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_SCOPED_REFPTR_H_
+#define BASE_MEMORY_SCOPED_REFPTR_H_
+
+#include <stddef.h>
+
+#include <iosfwd>
+#include <type_traits>
+#include <utility>
+
+#include "base/compiler_specific.h"
+#include "base/logging.h"
+#include "base/macros.h"
+
+template <class T>
+class scoped_refptr;
+
+namespace base {
+
+template <class, typename>
+class RefCounted;
+template <class, typename>
+class RefCountedThreadSafe;
+class SequencedTaskRunner;
+class WrappedPromise;
+
+template <typename T>
+scoped_refptr<T> AdoptRef(T* t);
+
+namespace internal {
+
+class BasePromise;
+
+} // namespace internal
+
+namespace subtle {
+
+enum AdoptRefTag { kAdoptRefTag };
+enum StartRefCountFromZeroTag { kStartRefCountFromZeroTag };
+enum StartRefCountFromOneTag { kStartRefCountFromOneTag };
+
+template <typename T, typename U, typename V>
+constexpr bool IsRefCountPreferenceOverridden(const T*,
+ const RefCounted<U, V>*) {
+ return !std::is_same<std::decay_t<decltype(T::kRefCountPreference)>,
+ std::decay_t<decltype(U::kRefCountPreference)>>::value;
+}
+
+template <typename T, typename U, typename V>
+constexpr bool IsRefCountPreferenceOverridden(
+ const T*,
+ const RefCountedThreadSafe<U, V>*) {
+ return !std::is_same<std::decay_t<decltype(T::kRefCountPreference)>,
+ std::decay_t<decltype(U::kRefCountPreference)>>::value;
+}
+
+constexpr bool IsRefCountPreferenceOverridden(...) {
+ return false;
+}
+
+} // namespace subtle
+
+// Creates a scoped_refptr from a raw pointer without incrementing the reference
+// count. Use this only for a newly created object whose reference count starts
+// from 1 instead of 0.
+template <typename T>
+scoped_refptr<T> AdoptRef(T* obj) {
+ using Tag = std::decay_t<decltype(T::kRefCountPreference)>;
+ static_assert(std::is_same<subtle::StartRefCountFromOneTag, Tag>::value,
+ "Use AdoptRef only if the reference count starts from one.");
+
+ DCHECK(obj);
+ DCHECK(obj->HasOneRef());
+ obj->Adopted();
+ return scoped_refptr<T>(obj, subtle::kAdoptRefTag);
+}
+
+namespace subtle {
+
+template <typename T>
+scoped_refptr<T> AdoptRefIfNeeded(T* obj, StartRefCountFromZeroTag) {
+ return scoped_refptr<T>(obj);
+}
+
+template <typename T>
+scoped_refptr<T> AdoptRefIfNeeded(T* obj, StartRefCountFromOneTag) {
+ return AdoptRef(obj);
+}
+
+} // namespace subtle
+
+// Constructs an instance of T, which is a ref counted type, and wraps the
+// object into a scoped_refptr<T>.
+template <typename T, typename... Args>
+scoped_refptr<T> MakeRefCounted(Args&&... args) {
+ T* obj = new T(std::forward<Args>(args)...);
+ return subtle::AdoptRefIfNeeded(obj, T::kRefCountPreference);
+}
+
+// Takes an instance of T, which is a ref counted type, and wraps the object
+// into a scoped_refptr<T>.
+template <typename T>
+scoped_refptr<T> WrapRefCounted(T* t) {
+ return scoped_refptr<T>(t);
+}
+
+} // namespace base
+
+//
+// A smart pointer class for reference counted objects. Use this class instead
+// of calling AddRef and Release manually on a reference counted object to
+// avoid common memory leaks caused by forgetting to Release an object
+// reference. Sample usage:
+//
+// class MyFoo : public RefCounted<MyFoo> {
+// ...
+// private:
+// friend class RefCounted<MyFoo>; // Allow destruction by RefCounted<>.
+// ~MyFoo(); // Destructor must be private/protected.
+// };
+//
+// void some_function() {
+// scoped_refptr<MyFoo> foo = MakeRefCounted<MyFoo>();
+// foo->Method(param);
+// // |foo| is released when this function returns
+// }
+//
+// void some_other_function() {
+// scoped_refptr<MyFoo> foo = MakeRefCounted<MyFoo>();
+// ...
+// foo.reset(); // explicitly releases |foo|
+// ...
+// if (foo)
+// foo->Method(param);
+// }
+//
+// The above examples show how scoped_refptr<T> acts like a pointer to T.
+// Given two scoped_refptr<T> classes, it is also possible to exchange
+// references between the two objects, like so:
+//
+// {
+// scoped_refptr<MyFoo> a = MakeRefCounted<MyFoo>();
+// scoped_refptr<MyFoo> b;
+//
+// b.swap(a);
+// // now, |b| references the MyFoo object, and |a| references nullptr.
+// }
+//
+// To make both |a| and |b| in the above example reference the same MyFoo
+// object, simply use the assignment operator:
+//
+// {
+// scoped_refptr<MyFoo> a = MakeRefCounted<MyFoo>();
+// scoped_refptr<MyFoo> b;
+//
+// b = a;
+// // now, |a| and |b| each own a reference to the same MyFoo object.
+// }
+//
+// Also see Chromium's ownership and calling conventions:
+// https://chromium.googlesource.com/chromium/src/+/lkgr/styleguide/c++/c++.md#object-ownership-and-calling-conventions
+// Specifically:
+// If the function (at least sometimes) takes a ref on a refcounted object,
+// declare the param as scoped_refptr<T>. The caller can decide whether it
+// wishes to transfer ownership (by calling std::move(t) when passing t) or
+// retain its ref (by simply passing t directly).
+// In other words, use scoped_refptr like you would a std::unique_ptr except
+// in the odd case where it's required to hold on to a ref while handing one
+// to another component (if a component merely needs to use t on the stack
+// without keeping a ref: pass t as a raw T*).
+template <class T>
+class scoped_refptr {
+ public:
+ typedef T element_type;
+
+ constexpr scoped_refptr() = default;
+
+ // Allow implicit construction from nullptr.
+ constexpr scoped_refptr(std::nullptr_t) {}
+
+ // Constructs from a raw pointer. Note that this constructor allows implicit
+ // conversion from T* to scoped_refptr<T> which is strongly discouraged. If
+ // you are creating a new ref-counted object please use
+ // base::MakeRefCounted<T>() or base::WrapRefCounted<T>(). Otherwise you
+ // should move or copy construct from an existing scoped_refptr<T> to the
+ // ref-counted object.
+ scoped_refptr(T* p) : ptr_(p) {
+ if (ptr_)
+ AddRef(ptr_);
+ }
+
+ // Copy constructor. This is required in addition to the copy conversion
+ // constructor below.
+ scoped_refptr(const scoped_refptr& r) : scoped_refptr(r.ptr_) {}
+
+ // Copy conversion constructor.
+ template <typename U,
+ typename = typename std::enable_if<
+ std::is_convertible<U*, T*>::value>::type>
+ scoped_refptr(const scoped_refptr<U>& r) : scoped_refptr(r.ptr_) {}
+
+ // Move constructor. This is required in addition to the move conversion
+ // constructor below.
+ scoped_refptr(scoped_refptr&& r) noexcept : ptr_(r.ptr_) { r.ptr_ = nullptr; }
+
+ // Move conversion constructor.
+ template <typename U,
+ typename = typename std::enable_if<
+ std::is_convertible<U*, T*>::value>::type>
+ scoped_refptr(scoped_refptr<U>&& r) noexcept : ptr_(r.ptr_) {
+ r.ptr_ = nullptr;
+ }
+
+ ~scoped_refptr() {
+ static_assert(!base::subtle::IsRefCountPreferenceOverridden(
+ static_cast<T*>(nullptr), static_cast<T*>(nullptr)),
+ "It's unsafe to override the ref count preference."
+ " Please remove REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE"
+ " from subclasses.");
+ if (ptr_)
+ Release(ptr_);
+ }
+
+ T* get() const { return ptr_; }
+
+ T& operator*() const {
+ DCHECK(ptr_);
+ return *ptr_;
+ }
+
+ T* operator->() const {
+ DCHECK(ptr_);
+ return ptr_;
+ }
+
+ scoped_refptr& operator=(std::nullptr_t) {
+ reset();
+ return *this;
+ }
+
+ scoped_refptr& operator=(T* p) { return *this = scoped_refptr(p); }
+
+ // Unified assignment operator.
+ scoped_refptr& operator=(scoped_refptr r) noexcept {
+ swap(r);
+ return *this;
+ }
+
+ // Sets managed object to null and releases reference to the previous managed
+ // object, if it existed.
+ void reset() { scoped_refptr().swap(*this); }
+
+ void swap(scoped_refptr& r) noexcept { std::swap(ptr_, r.ptr_); }
+
+ explicit operator bool() const { return ptr_ != nullptr; }
+
+ template <typename U>
+ bool operator==(const scoped_refptr<U>& rhs) const {
+ return ptr_ == rhs.get();
+ }
+
+ template <typename U>
+ bool operator!=(const scoped_refptr<U>& rhs) const {
+ return !operator==(rhs);
+ }
+
+ template <typename U>
+ bool operator<(const scoped_refptr<U>& rhs) const {
+ return ptr_ < rhs.get();
+ }
+
+ protected:
+ T* ptr_ = nullptr;
+
+ private:
+ template <typename U>
+ friend scoped_refptr<U> base::AdoptRef(U*);
+ friend class ::base::SequencedTaskRunner;
+
+ // Friend access so these classes can use the constructor below as part of a
+ // binary size optimization.
+ friend class ::base::internal::BasePromise;
+ friend class ::base::WrappedPromise;
+
+ // Returns the owned pointer (if any), releasing ownership to the caller. The
+ // caller is responsible for managing the lifetime of the reference.
+ T* release();
+
+ scoped_refptr(T* p, base::subtle::AdoptRefTag) : ptr_(p) {}
+
+ // Friend required for move constructors that set r.ptr_ to null.
+ template <typename U>
+ friend class scoped_refptr;
+
+ // Non-inline helpers to allow:
+ // class Opaque;
+ // extern template class scoped_refptr<Opaque>;
+ // Otherwise the compiler will complain that Opaque is an incomplete type.
+ static void AddRef(T* ptr);
+ static void Release(T* ptr);
+};
+
+template <typename T>
+T* scoped_refptr<T>::release() {
+ T* ptr = ptr_;
+ ptr_ = nullptr;
+ return ptr;
+}
+
+// static
+template <typename T>
+void scoped_refptr<T>::AddRef(T* ptr) {
+ ptr->AddRef();
+}
+
+// static
+template <typename T>
+void scoped_refptr<T>::Release(T* ptr) {
+ ptr->Release();
+}
+
+template <typename T, typename U>
+bool operator==(const scoped_refptr<T>& lhs, const U* rhs) {
+ return lhs.get() == rhs;
+}
+
+template <typename T, typename U>
+bool operator==(const T* lhs, const scoped_refptr<U>& rhs) {
+ return lhs == rhs.get();
+}
+
+template <typename T>
+bool operator==(const scoped_refptr<T>& lhs, std::nullptr_t null) {
+ return !static_cast<bool>(lhs);
+}
+
+template <typename T>
+bool operator==(std::nullptr_t null, const scoped_refptr<T>& rhs) {
+ return !static_cast<bool>(rhs);
+}
+
+template <typename T, typename U>
+bool operator!=(const scoped_refptr<T>& lhs, const U* rhs) {
+ return !operator==(lhs, rhs);
+}
+
+template <typename T, typename U>
+bool operator!=(const T* lhs, const scoped_refptr<U>& rhs) {
+ return !operator==(lhs, rhs);
+}
+
+template <typename T>
+bool operator!=(const scoped_refptr<T>& lhs, std::nullptr_t null) {
+ return !operator==(lhs, null);
+}
+
+template <typename T>
+bool operator!=(std::nullptr_t null, const scoped_refptr<T>& rhs) {
+ return !operator==(null, rhs);
+}
+
+template <typename T>
+std::ostream& operator<<(std::ostream& out, const scoped_refptr<T>& p) {
+ return out << p.get();
+}
+
+template <typename T>
+void swap(scoped_refptr<T>& lhs, scoped_refptr<T>& rhs) noexcept {
+ lhs.swap(rhs);
+}
+
+#endif // BASE_MEMORY_SCOPED_REFPTR_H_
diff --git a/security/sandbox/chromium/base/memory/shared_memory_mapping.cc b/security/sandbox/chromium/base/memory/shared_memory_mapping.cc
new file mode 100644
index 0000000000..8426fa8c21
--- /dev/null
+++ b/security/sandbox/chromium/base/memory/shared_memory_mapping.cc
@@ -0,0 +1,115 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory_mapping.h"
+
+#include <utility>
+
+#include "base/logging.h"
+#include "base/memory/shared_memory_tracker.h"
+#include "base/unguessable_token.h"
+#include "build/build_config.h"
+
+#if defined(OS_POSIX)
+#include <sys/mman.h>
+#endif
+
+#if defined(OS_WIN)
+#include <aclapi.h>
+#endif
+
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+#include <mach/mach_vm.h>
+#include "base/mac/mach_logging.h"
+#endif
+
+#if defined(OS_FUCHSIA)
+#include <lib/zx/vmar.h>
+#include "base/fuchsia/fuchsia_logging.h"
+#endif
+
+namespace base {
+
+SharedMemoryMapping::SharedMemoryMapping() = default;
+
+SharedMemoryMapping::SharedMemoryMapping(SharedMemoryMapping&& mapping) noexcept
+ : memory_(mapping.memory_),
+ size_(mapping.size_),
+ mapped_size_(mapping.mapped_size_),
+ guid_(mapping.guid_) {
+ mapping.memory_ = nullptr;
+}
+
+SharedMemoryMapping& SharedMemoryMapping::operator=(
+ SharedMemoryMapping&& mapping) noexcept {
+ Unmap();
+ memory_ = mapping.memory_;
+ size_ = mapping.size_;
+ mapped_size_ = mapping.mapped_size_;
+ guid_ = mapping.guid_;
+ mapping.memory_ = nullptr;
+ return *this;
+}
+
+SharedMemoryMapping::~SharedMemoryMapping() {
+ Unmap();
+}
+
+SharedMemoryMapping::SharedMemoryMapping(void* memory,
+ size_t size,
+ size_t mapped_size,
+ const UnguessableToken& guid)
+ : memory_(memory), size_(size), mapped_size_(mapped_size), guid_(guid) {
+ SharedMemoryTracker::GetInstance()->IncrementMemoryUsage(*this);
+}
+
+void SharedMemoryMapping::Unmap() {
+ if (!IsValid())
+ return;
+
+ SharedMemoryTracker::GetInstance()->DecrementMemoryUsage(*this);
+#if defined(OS_WIN)
+ if (!UnmapViewOfFile(memory_))
+ DPLOG(ERROR) << "UnmapViewOfFile";
+#elif defined(OS_FUCHSIA)
+ uintptr_t addr = reinterpret_cast<uintptr_t>(memory_);
+ zx_status_t status = zx::vmar::root_self()->unmap(addr, mapped_size_);
+ if (status != ZX_OK)
+ ZX_DLOG(ERROR, status) << "zx_vmar_unmap";
+#elif defined(OS_MACOSX) && !defined(OS_IOS)
+ kern_return_t kr = mach_vm_deallocate(
+ mach_task_self(), reinterpret_cast<mach_vm_address_t>(memory_),
+ mapped_size_);
+ MACH_DLOG_IF(ERROR, kr != KERN_SUCCESS, kr) << "mach_vm_deallocate";
+#else
+ if (munmap(memory_, mapped_size_) < 0)
+ DPLOG(ERROR) << "munmap";
+#endif
+}
+
+ReadOnlySharedMemoryMapping::ReadOnlySharedMemoryMapping() = default;
+ReadOnlySharedMemoryMapping::ReadOnlySharedMemoryMapping(
+ ReadOnlySharedMemoryMapping&&) noexcept = default;
+ReadOnlySharedMemoryMapping& ReadOnlySharedMemoryMapping::operator=(
+ ReadOnlySharedMemoryMapping&&) noexcept = default;
+ReadOnlySharedMemoryMapping::ReadOnlySharedMemoryMapping(
+ void* address,
+ size_t size,
+ size_t mapped_size,
+ const UnguessableToken& guid)
+ : SharedMemoryMapping(address, size, mapped_size, guid) {}
+
+WritableSharedMemoryMapping::WritableSharedMemoryMapping() = default;
+WritableSharedMemoryMapping::WritableSharedMemoryMapping(
+ WritableSharedMemoryMapping&&) noexcept = default;
+WritableSharedMemoryMapping& WritableSharedMemoryMapping::operator=(
+ WritableSharedMemoryMapping&&) noexcept = default;
+WritableSharedMemoryMapping::WritableSharedMemoryMapping(
+ void* address,
+ size_t size,
+ size_t mapped_size,
+ const UnguessableToken& guid)
+ : SharedMemoryMapping(address, size, mapped_size, guid) {}
+
+} // namespace base
diff --git a/security/sandbox/chromium/base/memory/shared_memory_mapping.h b/security/sandbox/chromium/base/memory/shared_memory_mapping.h
new file mode 100644
index 0000000000..2b8858e166
--- /dev/null
+++ b/security/sandbox/chromium/base/memory/shared_memory_mapping.h
@@ -0,0 +1,252 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_SHARED_MEMORY_MAPPING_H_
+#define BASE_MEMORY_SHARED_MEMORY_MAPPING_H_
+
+#include <cstddef>
+#include <type_traits>
+
+#include "base/containers/buffer_iterator.h"
+#include "base/containers/span.h"
+#include "base/macros.h"
+#include "base/unguessable_token.h"
+
+namespace base {
+
+namespace subtle {
+class PlatformSharedMemoryRegion;
+} // namespace subtle
+
+// Base class for scoped handles to a shared memory mapping created from a
+// shared memory region. Created shared memory mappings remain valid even if the
+// creator region is transferred or destroyed.
+//
+// Each mapping has an UnguessableToken that identifies the shared memory region
+// it was created from. This is used for memory metrics, to avoid overcounting
+// shared memory.
+class BASE_EXPORT SharedMemoryMapping {
+ public:
+ // Default constructor initializes an invalid instance.
+ SharedMemoryMapping();
+
+ // Move operations are allowed.
+ SharedMemoryMapping(SharedMemoryMapping&& mapping) noexcept;
+ SharedMemoryMapping& operator=(SharedMemoryMapping&& mapping) noexcept;
+
+ // Unmaps the region if the mapping is valid.
+ virtual ~SharedMemoryMapping();
+
+ // Returns true iff the mapping is valid. False means there is no
+ // corresponding area of memory.
+ bool IsValid() const { return memory_ != nullptr; }
+
+ // Returns the logical size of the mapping in bytes. This is precisely the
+ // size requested by whoever created the mapping, and it is always less than
+ // or equal to |mapped_size()|. This is undefined for invalid instances.
+ size_t size() const {
+ DCHECK(IsValid());
+ return size_;
+ }
+
+ // Returns the actual size of the mapping in bytes. This is always at least
+ // as large as |size()| but may be larger due to platform mapping alignment
+ // constraints. This is undefined for invalid instances.
+ size_t mapped_size() const {
+ DCHECK(IsValid());
+ return mapped_size_;
+ }
+
+ // Returns 128-bit GUID of the region this mapping belongs to.
+ const UnguessableToken& guid() const {
+ DCHECK(IsValid());
+ return guid_;
+ }
+
+ protected:
+ SharedMemoryMapping(void* address,
+ size_t size,
+ size_t mapped_size,
+ const UnguessableToken& guid);
+ void* raw_memory_ptr() const { return memory_; }
+
+ private:
+ friend class SharedMemoryTracker;
+
+ void Unmap();
+
+ void* memory_ = nullptr;
+ size_t size_ = 0;
+ size_t mapped_size_ = 0;
+ UnguessableToken guid_;
+
+ DISALLOW_COPY_AND_ASSIGN(SharedMemoryMapping);
+};
+
+// Class modeling a read-only mapping of a shared memory region into the
+// current process' address space. This is created by ReadOnlySharedMemoryRegion
+// instances.
+class BASE_EXPORT ReadOnlySharedMemoryMapping : public SharedMemoryMapping {
+ public:
+ // Default constructor initializes an invalid instance.
+ ReadOnlySharedMemoryMapping();
+
+ // Move operations are allowed.
+ ReadOnlySharedMemoryMapping(ReadOnlySharedMemoryMapping&&) noexcept;
+ ReadOnlySharedMemoryMapping& operator=(
+ ReadOnlySharedMemoryMapping&&) noexcept;
+
+ // Returns the base address of the mapping. This is read-only memory. This is
+ // page-aligned. This is nullptr for invalid instances.
+ const void* memory() const { return raw_memory_ptr(); }
+
+ // Returns a pointer to a page-aligned const T if the mapping is valid and
+ // large enough to contain a T, or nullptr otherwise.
+ template <typename T>
+ const T* GetMemoryAs() const {
+ static_assert(std::is_trivially_copyable<T>::value,
+ "Copying non-trivially-copyable object across memory spaces "
+ "is dangerous");
+ if (!IsValid())
+ return nullptr;
+ if (sizeof(T) > size())
+ return nullptr;
+ return static_cast<const T*>(raw_memory_ptr());
+ }
+
+ // Returns a span of const T. The number of elements is autodeduced from the
+ // size of the shared memory mapping. The number of elements may be
+ // autodeduced as zero, i.e. the mapping is invalid or the size of the mapping
+ // isn't large enough to contain even one T: in that case, an empty span
+ // will be returned. The first element, if any, is guaranteed to be
+ // page-aligned.
+ template <typename T>
+ span<const T> GetMemoryAsSpan() const {
+ static_assert(std::is_trivially_copyable<T>::value,
+ "Copying non-trivially-copyable object across memory spaces "
+ "is dangerous");
+ if (!IsValid())
+ return span<const T>();
+ size_t count = size() / sizeof(T);
+ return GetMemoryAsSpan<T>(count);
+ }
+
+ // Returns a span of const T with |count| elements if the mapping is valid and
+ // large enough to contain |count| elements, or an empty span otherwise. The
+ // first element, if any, is guaranteed to be page-aligned.
+ template <typename T>
+ span<const T> GetMemoryAsSpan(size_t count) const {
+ static_assert(std::is_trivially_copyable<T>::value,
+ "Copying non-trivially-copyable object across memory spaces "
+ "is dangerous");
+ if (!IsValid())
+ return span<const T>();
+ if (size() / sizeof(T) < count)
+ return span<const T>();
+ return span<const T>(static_cast<const T*>(raw_memory_ptr()), count);
+ }
+
+ // Returns a BufferIterator of const T.
+ template <typename T>
+ BufferIterator<const T> GetMemoryAsBufferIterator() const {
+ return BufferIterator<const T>(GetMemoryAsSpan<T>());
+ }
+
+ private:
+ friend class ReadOnlySharedMemoryRegion;
+ ReadOnlySharedMemoryMapping(void* address,
+ size_t size,
+ size_t mapped_size,
+ const UnguessableToken& guid);
+
+ DISALLOW_COPY_AND_ASSIGN(ReadOnlySharedMemoryMapping);
+};
+
+// Class modeling a writable mapping of a shared memory region into the
+// current process' address space. This is created by *SharedMemoryRegion
+// instances.
+class BASE_EXPORT WritableSharedMemoryMapping : public SharedMemoryMapping {
+ public:
+ // Default constructor initializes an invalid instance.
+ WritableSharedMemoryMapping();
+
+ // Move operations are allowed.
+ WritableSharedMemoryMapping(WritableSharedMemoryMapping&&) noexcept;
+ WritableSharedMemoryMapping& operator=(
+ WritableSharedMemoryMapping&&) noexcept;
+
+ // Returns the base address of the mapping. This is writable memory. This is
+ // page-aligned. This is nullptr for invalid instances.
+ void* memory() const { return raw_memory_ptr(); }
+
+ // Returns a pointer to a page-aligned T if the mapping is valid and large
+ // enough to contain a T, or nullptr otherwise.
+ template <typename T>
+ T* GetMemoryAs() const {
+ static_assert(std::is_trivially_copyable<T>::value,
+ "Copying non-trivially-copyable object across memory spaces "
+ "is dangerous");
+ if (!IsValid())
+ return nullptr;
+ if (sizeof(T) > size())
+ return nullptr;
+ return static_cast<T*>(raw_memory_ptr());
+ }
+
+ // Returns a span of T. The number of elements is autodeduced from the size of
+ // the shared memory mapping. The number of elements may be autodeduced as
+ // zero, i.e. the mapping is invalid or the size of the mapping isn't large
+ // enough to contain even one T: in that case, an empty span will be returned.
+ // The first element, if any, is guaranteed to be page-aligned.
+ template <typename T>
+ span<T> GetMemoryAsSpan() const {
+ static_assert(std::is_trivially_copyable<T>::value,
+ "Copying non-trivially-copyable object across memory spaces "
+ "is dangerous");
+ if (!IsValid())
+ return span<T>();
+ size_t count = size() / sizeof(T);
+ return GetMemoryAsSpan<T>(count);
+ }
+
+ // Returns a span of T with |count| elements if the mapping is valid and large
+ // enough to contain |count| elements, or an empty span otherwise. The first
+ // element, if any, is guaranteed to be page-aligned.
+ template <typename T>
+ span<T> GetMemoryAsSpan(size_t count) const {
+ static_assert(std::is_trivially_copyable<T>::value,
+ "Copying non-trivially-copyable object across memory spaces "
+ "is dangerous");
+ if (!IsValid())
+ return span<T>();
+ if (size() / sizeof(T) < count)
+ return span<T>();
+ return span<T>(static_cast<T*>(raw_memory_ptr()), count);
+ }
+
+ // Returns a BufferIterator of T.
+ template <typename T>
+ BufferIterator<T> GetMemoryAsBufferIterator() {
+ return BufferIterator<T>(GetMemoryAsSpan<T>());
+ }
+
+ private:
+ friend WritableSharedMemoryMapping MapAtForTesting(
+ subtle::PlatformSharedMemoryRegion* region,
+ off_t offset,
+ size_t size);
+ friend class ReadOnlySharedMemoryRegion;
+ friend class WritableSharedMemoryRegion;
+ friend class UnsafeSharedMemoryRegion;
+ WritableSharedMemoryMapping(void* address,
+ size_t size,
+ size_t mapped_size,
+ const UnguessableToken& guid);
+
+ DISALLOW_COPY_AND_ASSIGN(WritableSharedMemoryMapping);
+};
+
+} // namespace base
+
+#endif // BASE_MEMORY_SHARED_MEMORY_MAPPING_H_
diff --git a/security/sandbox/chromium/base/memory/singleton.h b/security/sandbox/chromium/base/memory/singleton.h
new file mode 100644
index 0000000000..87b57919c0
--- /dev/null
+++ b/security/sandbox/chromium/base/memory/singleton.h
@@ -0,0 +1,279 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+// PLEASE READ: Do you really need a singleton? If possible, use a
+// function-local static of type base::NoDestructor<T> instead:
+//
+// Factory& Factory::GetInstance() {
+// static base::NoDestructor<Factory> instance;
+// return *instance;
+// }
+// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+//
+// Singletons make it hard to determine the lifetime of an object, which can
+// lead to buggy code and spurious crashes.
+//
+// Instead of adding another singleton into the mix, try to identify either:
+// a) An existing singleton that can manage your object's lifetime
+// b) Locations where you can deterministically create the object and pass
+// into other objects
+//
+// If you absolutely need a singleton, please keep them as trivial as possible
+// and ideally a leaf dependency. Singletons get problematic when they attempt
+// to do too much in their destructor or have circular dependencies.
+
+#ifndef BASE_MEMORY_SINGLETON_H_
+#define BASE_MEMORY_SINGLETON_H_
+
+#include "base/at_exit.h"
+#include "base/atomicops.h"
+#include "base/base_export.h"
+#include "base/lazy_instance_helpers.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/threading/thread_restrictions.h"
+
+namespace base {
+
+// Default traits for Singleton<Type>. Calls operator new and operator delete on
+// the object. Registers automatic deletion at process exit.
+// Overload if you need arguments or another memory allocation function.
+template<typename Type>
+struct DefaultSingletonTraits {
+ // Allocates the object.
+ static Type* New() {
+ // The parenthesis is very important here; it forces POD type
+ // initialization.
+ return new Type();
+ }
+
+ // Destroys the object.
+ static void Delete(Type* x) {
+ delete x;
+ }
+
+ // Set to true to automatically register deletion of the object on process
+ // exit. See below for the required call that makes this happen.
+ static const bool kRegisterAtExit = true;
+
+#if DCHECK_IS_ON()
+ // Set to false to disallow access on a non-joinable thread. This is
+ // different from kRegisterAtExit because StaticMemorySingletonTraits allows
+ // access on non-joinable threads, and gracefully handles this.
+ static const bool kAllowedToAccessOnNonjoinableThread = false;
+#endif
+};
+
+
+// Alternate traits for use with the Singleton<Type>. Identical to
+// DefaultSingletonTraits except that the Singleton will not be cleaned up
+// at exit.
+template<typename Type>
+struct LeakySingletonTraits : public DefaultSingletonTraits<Type> {
+ static const bool kRegisterAtExit = false;
+#if DCHECK_IS_ON()
+ static const bool kAllowedToAccessOnNonjoinableThread = true;
+#endif
+};
+
+// Alternate traits for use with the Singleton<Type>. Allocates memory
+// for the singleton instance from a static buffer. The singleton will
+// be cleaned up at exit, but can't be revived after destruction unless
+// the ResurrectForTesting() method is called.
+//
+// This is useful for a certain category of things, notably logging and
+// tracing, where the singleton instance is of a type carefully constructed to
+// be safe to access post-destruction.
+// In logging and tracing you'll typically get stray calls at odd times, like
+// during static destruction, thread teardown and the like, and there's a
+// termination race on the heap-based singleton - e.g. if one thread calls
+// get(), but then another thread initiates AtExit processing, the first thread
+// may call into an object residing in unallocated memory. If the instance is
+// allocated from the data segment, then this is survivable.
+//
+// The destructor is to deallocate system resources, in this case to unregister
+// a callback the system will invoke when logging levels change. Note that
+// this is also used in e.g. Chrome Frame, where you have to allow for the
+// possibility of loading briefly into someone else's process space, and
+// so leaking is not an option, as that would sabotage the state of your host
+// process once you've unloaded.
+template <typename Type>
+struct StaticMemorySingletonTraits {
+ // WARNING: User has to support a New() which returns null.
+ static Type* New() {
+ // Only constructs once and returns pointer; otherwise returns null.
+ if (subtle::NoBarrier_AtomicExchange(&dead_, 1))
+ return nullptr;
+
+ return new (buffer_) Type();
+ }
+
+ static void Delete(Type* p) {
+ if (p)
+ p->Type::~Type();
+ }
+
+ static const bool kRegisterAtExit = true;
+
+#if DCHECK_IS_ON()
+ static const bool kAllowedToAccessOnNonjoinableThread = true;
+#endif
+
+ static void ResurrectForTesting() { subtle::NoBarrier_Store(&dead_, 0); }
+
+ private:
+ alignas(Type) static char buffer_[sizeof(Type)];
+ // Signal the object was already deleted, so it is not revived.
+ static subtle::Atomic32 dead_;
+};
+
+template <typename Type>
+alignas(Type) char StaticMemorySingletonTraits<Type>::buffer_[sizeof(Type)];
+template <typename Type>
+subtle::Atomic32 StaticMemorySingletonTraits<Type>::dead_ = 0;
+
+// The Singleton<Type, Traits, DifferentiatingType> class manages a single
+// instance of Type which will be created on first use and will be destroyed at
+// normal process exit). The Trait::Delete function will not be called on
+// abnormal process exit.
+//
+// DifferentiatingType is used as a key to differentiate two different
+// singletons having the same memory allocation functions but serving a
+// different purpose. This is mainly used for Locks serving different purposes.
+//
+// Example usage:
+//
+// In your header:
+// namespace base {
+// template <typename T>
+// struct DefaultSingletonTraits;
+// }
+// class FooClass {
+// public:
+// static FooClass* GetInstance(); <-- See comment below on this.
+// void Bar() { ... }
+// private:
+// FooClass() { ... }
+// friend struct base::DefaultSingletonTraits<FooClass>;
+//
+// DISALLOW_COPY_AND_ASSIGN(FooClass);
+// };
+//
+// In your source file:
+// #include "base/memory/singleton.h"
+// FooClass* FooClass::GetInstance() {
+// return base::Singleton<FooClass>::get();
+// }
+//
+// Or for leaky singletons:
+// #include "base/memory/singleton.h"
+// FooClass* FooClass::GetInstance() {
+// return base::Singleton<
+// FooClass, base::LeakySingletonTraits<FooClass>>::get();
+// }
+//
+// And to call methods on FooClass:
+// FooClass::GetInstance()->Bar();
+//
+// NOTE: The method accessing Singleton<T>::get() has to be named as GetInstance
+// and it is important that FooClass::GetInstance() is not inlined in the
+// header. This makes sure that when source files from multiple targets include
+// this header they don't end up with different copies of the inlined code
+// creating multiple copies of the singleton.
+//
+// Singleton<> has no non-static members and doesn't need to actually be
+// instantiated.
+//
+// This class is itself thread-safe. The underlying Type must of course be
+// thread-safe if you want to use it concurrently. Two parameters may be tuned
+// depending on the user's requirements.
+//
+// Glossary:
+// RAE = kRegisterAtExit
+//
+// On every platform, if Traits::RAE is true, the singleton will be destroyed at
+// process exit. More precisely it uses AtExitManager which requires an
+// object of this type to be instantiated. AtExitManager mimics the semantics
+// of atexit() such as LIFO order but under Windows is safer to call. For more
+// information see at_exit.h.
+//
+// If Traits::RAE is false, the singleton will not be freed at process exit,
+// thus the singleton will be leaked if it is ever accessed. Traits::RAE
+// shouldn't be false unless absolutely necessary. Remember that the heap where
+// the object is allocated may be destroyed by the CRT anyway.
+//
+// Caveats:
+// (a) Every call to get(), operator->() and operator*() incurs some overhead
+// (16ns on my P4/2.8GHz) to check whether the object has already been
+// initialized. You may wish to cache the result of get(); it will not
+// change.
+//
+// (b) Your factory function must never throw an exception. This class is not
+// exception-safe.
+//
+
+template <typename Type,
+ typename Traits = DefaultSingletonTraits<Type>,
+ typename DifferentiatingType = Type>
+class Singleton {
+ private:
+ // A class T using the Singleton<T> pattern should declare a GetInstance()
+ // method and call Singleton::get() from within that. T may also declare a
+ // GetInstanceIfExists() method to invoke Singleton::GetIfExists().
+ friend Type;
+
+ // This class is safe to be constructed and copy-constructed since it has no
+ // member.
+
+ // Returns a pointer to the one true instance of the class.
+ static Type* get() {
+#if DCHECK_IS_ON()
+ if (!Traits::kAllowedToAccessOnNonjoinableThread)
+ ThreadRestrictions::AssertSingletonAllowed();
+#endif
+
+ return subtle::GetOrCreateLazyPointer(
+ &instance_, &CreatorFunc, nullptr,
+ Traits::kRegisterAtExit ? OnExit : nullptr, nullptr);
+ }
+
+ // Returns the same result as get() if the instance exists but doesn't
+ // construct it (and returns null) if it doesn't.
+ static Type* GetIfExists() {
+#if DCHECK_IS_ON()
+ if (!Traits::kAllowedToAccessOnNonjoinableThread)
+ ThreadRestrictions::AssertSingletonAllowed();
+#endif
+
+ if (!subtle::NoBarrier_Load(&instance_))
+ return nullptr;
+
+ // Need to invoke get() nonetheless as some Traits return null after
+ // destruction (even though |instance_| still holds garbage).
+ return get();
+ }
+
+ // Internal method used as an adaptor for GetOrCreateLazyPointer(). Do not use
+ // outside of that use case.
+ static Type* CreatorFunc(void* /* creator_arg*/) { return Traits::New(); }
+
+ // Adapter function for use with AtExit(). This should be called single
+ // threaded, so don't use atomic operations.
+ // Calling OnExit while singleton is in use by other threads is a mistake.
+ static void OnExit(void* /*unused*/) {
+ // AtExit should only ever be register after the singleton instance was
+ // created. We should only ever get here with a valid instance_ pointer.
+ Traits::Delete(reinterpret_cast<Type*>(subtle::NoBarrier_Load(&instance_)));
+ instance_ = 0;
+ }
+ static subtle::AtomicWord instance_;
+};
+
+template <typename Type, typename Traits, typename DifferentiatingType>
+subtle::AtomicWord Singleton<Type, Traits, DifferentiatingType>::instance_ = 0;
+
+} // namespace base
+
+#endif // BASE_MEMORY_SINGLETON_H_
diff --git a/security/sandbox/chromium/base/memory/unsafe_shared_memory_region.cc b/security/sandbox/chromium/base/memory/unsafe_shared_memory_region.cc
new file mode 100644
index 0000000000..92385d3e78
--- /dev/null
+++ b/security/sandbox/chromium/base/memory/unsafe_shared_memory_region.cc
@@ -0,0 +1,80 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/unsafe_shared_memory_region.h"
+
+#include <utility>
+
+namespace base {
+
+UnsafeSharedMemoryRegion::CreateFunction*
+ UnsafeSharedMemoryRegion::create_hook_ = nullptr;
+
+// static
+UnsafeSharedMemoryRegion UnsafeSharedMemoryRegion::Create(size_t size) {
+ if (create_hook_)
+ return create_hook_(size);
+
+ subtle::PlatformSharedMemoryRegion handle =
+ subtle::PlatformSharedMemoryRegion::CreateUnsafe(size);
+
+ return UnsafeSharedMemoryRegion(std::move(handle));
+}
+
+// static
+UnsafeSharedMemoryRegion UnsafeSharedMemoryRegion::Deserialize(
+ subtle::PlatformSharedMemoryRegion handle) {
+ return UnsafeSharedMemoryRegion(std::move(handle));
+}
+
+// static
+subtle::PlatformSharedMemoryRegion
+UnsafeSharedMemoryRegion::TakeHandleForSerialization(
+ UnsafeSharedMemoryRegion region) {
+ return std::move(region.handle_);
+}
+
+UnsafeSharedMemoryRegion::UnsafeSharedMemoryRegion() = default;
+UnsafeSharedMemoryRegion::UnsafeSharedMemoryRegion(
+ UnsafeSharedMemoryRegion&& region) = default;
+UnsafeSharedMemoryRegion& UnsafeSharedMemoryRegion::operator=(
+ UnsafeSharedMemoryRegion&& region) = default;
+UnsafeSharedMemoryRegion::~UnsafeSharedMemoryRegion() = default;
+
+UnsafeSharedMemoryRegion UnsafeSharedMemoryRegion::Duplicate() const {
+ return UnsafeSharedMemoryRegion(handle_.Duplicate());
+}
+
+WritableSharedMemoryMapping UnsafeSharedMemoryRegion::Map() const {
+ return MapAt(0, handle_.GetSize());
+}
+
+WritableSharedMemoryMapping UnsafeSharedMemoryRegion::MapAt(off_t offset,
+ size_t size) const {
+ if (!IsValid())
+ return {};
+
+ void* memory = nullptr;
+ size_t mapped_size = 0;
+ if (!handle_.MapAt(offset, size, &memory, &mapped_size))
+ return {};
+
+ return WritableSharedMemoryMapping(memory, size, mapped_size,
+ handle_.GetGUID());
+}
+
+bool UnsafeSharedMemoryRegion::IsValid() const {
+ return handle_.IsValid();
+}
+
+UnsafeSharedMemoryRegion::UnsafeSharedMemoryRegion(
+ subtle::PlatformSharedMemoryRegion handle)
+ : handle_(std::move(handle)) {
+ if (handle_.IsValid()) {
+ CHECK_EQ(handle_.GetMode(),
+ subtle::PlatformSharedMemoryRegion::Mode::kUnsafe);
+ }
+}
+
+} // namespace base
diff --git a/security/sandbox/chromium/base/memory/unsafe_shared_memory_region.h b/security/sandbox/chromium/base/memory/unsafe_shared_memory_region.h
new file mode 100644
index 0000000000..559d4c6830
--- /dev/null
+++ b/security/sandbox/chromium/base/memory/unsafe_shared_memory_region.h
@@ -0,0 +1,127 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_UNSAFE_SHARED_MEMORY_REGION_H_
+#define BASE_MEMORY_UNSAFE_SHARED_MEMORY_REGION_H_
+
+#include "base/gtest_prod_util.h"
+#include "base/macros.h"
+#include "base/memory/platform_shared_memory_region.h"
+#include "base/memory/shared_memory_mapping.h"
+
+namespace base {
+
+// Scoped move-only handle to a region of platform shared memory. The instance
+// owns the platform handle it wraps. Mappings created by this region are
+// writable. These mappings remain valid even after the region handle is moved
+// or destroyed.
+//
+// NOTE: UnsafeSharedMemoryRegion cannot be converted to a read-only region. Use
+// with caution as the region will be writable to any process with a handle to
+// the region.
+//
+// Use this if and only if the following is true:
+// - You do not need to share the region as read-only, and,
+// - You need to have several instances of the region simultaneously, possibly
+// in different processes, that can produce writable mappings.
+
+class BASE_EXPORT UnsafeSharedMemoryRegion {
+ public:
+ using MappingType = WritableSharedMemoryMapping;
+ // Creates a new UnsafeSharedMemoryRegion instance of a given size that can be
+ // used for mapping writable shared memory into the virtual address space.
+ //
+ // This call will fail if the process does not have sufficient permissions to
+ // create a shared memory region itself. See
+ // mojo::CreateUnsafeSharedMemoryRegion in
+ // mojo/public/cpp/base/shared_memory_utils.h for creating a shared memory
+ // region from a an unprivileged process where a broker must be used.
+ static UnsafeSharedMemoryRegion Create(size_t size);
+ using CreateFunction = decltype(Create);
+
+ // Returns an UnsafeSharedMemoryRegion built from a platform-specific handle
+ // that was taken from another UnsafeSharedMemoryRegion instance. Returns an
+ // invalid region iff the |handle| is invalid. CHECK-fails if the |handle|
+ // isn't unsafe.
+ // This should be used only by the code passing a handle across
+ // process boundaries.
+ static UnsafeSharedMemoryRegion Deserialize(
+ subtle::PlatformSharedMemoryRegion handle);
+
+ // Extracts a platform handle from the region. Ownership is transferred to the
+ // returned region object.
+ // This should be used only for sending the handle from the current
+ // process to another.
+ static subtle::PlatformSharedMemoryRegion TakeHandleForSerialization(
+ UnsafeSharedMemoryRegion region);
+
+ // Default constructor initializes an invalid instance.
+ UnsafeSharedMemoryRegion();
+
+ // Move operations are allowed.
+ UnsafeSharedMemoryRegion(UnsafeSharedMemoryRegion&&);
+ UnsafeSharedMemoryRegion& operator=(UnsafeSharedMemoryRegion&&);
+
+ // Destructor closes shared memory region if valid.
+ // All created mappings will remain valid.
+ ~UnsafeSharedMemoryRegion();
+
+ // Duplicates the underlying platform handle and creates a new
+ // UnsafeSharedMemoryRegion instance that owns the newly created handle.
+ // Returns a valid UnsafeSharedMemoryRegion on success, invalid otherwise.
+ // The current region instance remains valid in any case.
+ UnsafeSharedMemoryRegion Duplicate() const;
+
+ // Maps the shared memory region into the caller's address space with write
+ // access. The mapped address is guaranteed to have an alignment of
+ // at least |subtle::PlatformSharedMemoryRegion::kMapMinimumAlignment|.
+ // Returns a valid WritableSharedMemoryMapping instance on success, invalid
+ // otherwise.
+ WritableSharedMemoryMapping Map() const;
+
+ // Same as above, but maps only |size| bytes of the shared memory region
+ // starting with the given |offset|. |offset| must be aligned to value of
+ // |SysInfo::VMAllocationGranularity()|. Returns an invalid mapping if
+ // requested bytes are out of the region limits.
+ WritableSharedMemoryMapping MapAt(off_t offset, size_t size) const;
+
+ // Whether the underlying platform handle is valid.
+ bool IsValid() const;
+
+ // Returns the maximum mapping size that can be created from this region.
+ size_t GetSize() const {
+ DCHECK(IsValid());
+ return handle_.GetSize();
+ }
+
+ // Returns 128-bit GUID of the region.
+ const UnguessableToken& GetGUID() const {
+ DCHECK(IsValid());
+ return handle_.GetGUID();
+ }
+
+ // Returns a platform shared memory handle. |this| remains the owner of the
+ // handle.
+ subtle::PlatformSharedMemoryRegion::PlatformHandle GetPlatformHandle() const {
+ DCHECK(IsValid());
+ return handle_.GetPlatformHandle();
+ }
+
+ private:
+ friend class SharedMemoryHooks;
+
+ explicit UnsafeSharedMemoryRegion(subtle::PlatformSharedMemoryRegion handle);
+
+ static void set_create_hook(CreateFunction* hook) { create_hook_ = hook; }
+
+ static CreateFunction* create_hook_;
+
+ subtle::PlatformSharedMemoryRegion handle_;
+
+ DISALLOW_COPY_AND_ASSIGN(UnsafeSharedMemoryRegion);
+};
+
+} // namespace base
+
+#endif // BASE_MEMORY_UNSAFE_SHARED_MEMORY_REGION_H_
diff --git a/security/sandbox/chromium/base/memory/weak_ptr.h b/security/sandbox/chromium/base/memory/weak_ptr.h
new file mode 100644
index 0000000000..d274987168
--- /dev/null
+++ b/security/sandbox/chromium/base/memory/weak_ptr.h
@@ -0,0 +1,395 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Weak pointers are pointers to an object that do not affect its lifetime,
+// and which may be invalidated (i.e. reset to nullptr) by the object, or its
+// owner, at any time, most commonly when the object is about to be deleted.
+
+// Weak pointers are useful when an object needs to be accessed safely by one
+// or more objects other than its owner, and those callers can cope with the
+// object vanishing and e.g. tasks posted to it being silently dropped.
+// Reference-counting such an object would complicate the ownership graph and
+// make it harder to reason about the object's lifetime.
+
+// EXAMPLE:
+//
+// class Controller {
+// public:
+// void SpawnWorker() { Worker::StartNew(weak_factory_.GetWeakPtr()); }
+// void WorkComplete(const Result& result) { ... }
+// private:
+// // Member variables should appear before the WeakPtrFactory, to ensure
+// // that any WeakPtrs to Controller are invalidated before its members
+// // variable's destructors are executed, rendering them invalid.
+// WeakPtrFactory<Controller> weak_factory_{this};
+// };
+//
+// class Worker {
+// public:
+// static void StartNew(const WeakPtr<Controller>& controller) {
+// Worker* worker = new Worker(controller);
+// // Kick off asynchronous processing...
+// }
+// private:
+// Worker(const WeakPtr<Controller>& controller)
+// : controller_(controller) {}
+// void DidCompleteAsynchronousProcessing(const Result& result) {
+// if (controller_)
+// controller_->WorkComplete(result);
+// }
+// WeakPtr<Controller> controller_;
+// };
+//
+// With this implementation a caller may use SpawnWorker() to dispatch multiple
+// Workers and subsequently delete the Controller, without waiting for all
+// Workers to have completed.
+
+// ------------------------- IMPORTANT: Thread-safety -------------------------
+
+// Weak pointers may be passed safely between sequences, but must always be
+// dereferenced and invalidated on the same SequencedTaskRunner otherwise
+// checking the pointer would be racey.
+//
+// To ensure correct use, the first time a WeakPtr issued by a WeakPtrFactory
+// is dereferenced, the factory and its WeakPtrs become bound to the calling
+// sequence or current SequencedWorkerPool token, and cannot be dereferenced or
+// invalidated on any other task runner. Bound WeakPtrs can still be handed
+// off to other task runners, e.g. to use to post tasks back to object on the
+// bound sequence.
+//
+// If all WeakPtr objects are destroyed or invalidated then the factory is
+// unbound from the SequencedTaskRunner/Thread. The WeakPtrFactory may then be
+// destroyed, or new WeakPtr objects may be used, from a different sequence.
+//
+// Thus, at least one WeakPtr object must exist and have been dereferenced on
+// the correct sequence to enforce that other WeakPtr objects will enforce they
+// are used on the desired sequence.
+
+#ifndef BASE_MEMORY_WEAK_PTR_H_
+#define BASE_MEMORY_WEAK_PTR_H_
+
+#include <cstddef>
+#include <type_traits>
+
+#include "base/base_export.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/sequence_checker.h"
+#include "base/synchronization/atomic_flag.h"
+
+namespace base {
+
+template <typename T> class SupportsWeakPtr;
+template <typename T> class WeakPtr;
+
+namespace internal {
+// These classes are part of the WeakPtr implementation.
+// DO NOT USE THESE CLASSES DIRECTLY YOURSELF.
+
+class BASE_EXPORT WeakReference {
+ public:
+ // Although Flag is bound to a specific SequencedTaskRunner, it may be
+ // deleted from another via base::WeakPtr::~WeakPtr().
+ class BASE_EXPORT Flag : public RefCountedThreadSafe<Flag> {
+ public:
+ Flag();
+
+ void Invalidate();
+ bool IsValid() const;
+
+ bool MaybeValid() const;
+
+ void DetachFromSequence();
+
+ private:
+ friend class base::RefCountedThreadSafe<Flag>;
+
+ ~Flag();
+
+ SEQUENCE_CHECKER(sequence_checker_);
+ AtomicFlag invalidated_;
+ };
+
+ WeakReference();
+ explicit WeakReference(const scoped_refptr<Flag>& flag);
+ ~WeakReference();
+
+ WeakReference(WeakReference&& other) noexcept;
+ WeakReference(const WeakReference& other);
+ WeakReference& operator=(WeakReference&& other) noexcept = default;
+ WeakReference& operator=(const WeakReference& other) = default;
+
+ bool IsValid() const;
+ bool MaybeValid() const;
+
+ private:
+ scoped_refptr<const Flag> flag_;
+};
+
+class BASE_EXPORT WeakReferenceOwner {
+ public:
+ WeakReferenceOwner();
+ ~WeakReferenceOwner();
+
+ WeakReference GetRef() const;
+
+ bool HasRefs() const { return !flag_->HasOneRef(); }
+
+ void Invalidate();
+
+ private:
+ scoped_refptr<WeakReference::Flag> flag_;
+};
+
+// This class simplifies the implementation of WeakPtr's type conversion
+// constructor by avoiding the need for a public accessor for ref_. A
+// WeakPtr<T> cannot access the private members of WeakPtr<U>, so this
+// base class gives us a way to access ref_ in a protected fashion.
+class BASE_EXPORT WeakPtrBase {
+ public:
+ WeakPtrBase();
+ ~WeakPtrBase();
+
+ WeakPtrBase(const WeakPtrBase& other) = default;
+ WeakPtrBase(WeakPtrBase&& other) noexcept = default;
+ WeakPtrBase& operator=(const WeakPtrBase& other) = default;
+ WeakPtrBase& operator=(WeakPtrBase&& other) noexcept = default;
+
+ void reset() {
+ ref_ = internal::WeakReference();
+ ptr_ = 0;
+ }
+
+ protected:
+ WeakPtrBase(const WeakReference& ref, uintptr_t ptr);
+
+ WeakReference ref_;
+
+ // This pointer is only valid when ref_.is_valid() is true. Otherwise, its
+ // value is undefined (as opposed to nullptr).
+ uintptr_t ptr_;
+};
+
+// This class provides a common implementation of common functions that would
+// otherwise get instantiated separately for each distinct instantiation of
+// SupportsWeakPtr<>.
+class SupportsWeakPtrBase {
+ public:
+ // A safe static downcast of a WeakPtr<Base> to WeakPtr<Derived>. This
+ // conversion will only compile if there is exists a Base which inherits
+ // from SupportsWeakPtr<Base>. See base::AsWeakPtr() below for a helper
+ // function that makes calling this easier.
+ //
+ // Precondition: t != nullptr
+ template<typename Derived>
+ static WeakPtr<Derived> StaticAsWeakPtr(Derived* t) {
+ static_assert(
+ std::is_base_of<internal::SupportsWeakPtrBase, Derived>::value,
+ "AsWeakPtr argument must inherit from SupportsWeakPtr");
+ return AsWeakPtrImpl<Derived>(t);
+ }
+
+ private:
+ // This template function uses type inference to find a Base of Derived
+ // which is an instance of SupportsWeakPtr<Base>. We can then safely
+ // static_cast the Base* to a Derived*.
+ template <typename Derived, typename Base>
+ static WeakPtr<Derived> AsWeakPtrImpl(SupportsWeakPtr<Base>* t) {
+ WeakPtr<Base> ptr = t->AsWeakPtr();
+ return WeakPtr<Derived>(
+ ptr.ref_, static_cast<Derived*>(reinterpret_cast<Base*>(ptr.ptr_)));
+ }
+};
+
+} // namespace internal
+
+template <typename T> class WeakPtrFactory;
+
+// The WeakPtr class holds a weak reference to |T*|.
+//
+// This class is designed to be used like a normal pointer. You should always
+// null-test an object of this class before using it or invoking a method that
+// may result in the underlying object being destroyed.
+//
+// EXAMPLE:
+//
+// class Foo { ... };
+// WeakPtr<Foo> foo;
+// if (foo)
+// foo->method();
+//
+template <typename T>
+class WeakPtr : public internal::WeakPtrBase {
+ public:
+ WeakPtr() = default;
+ WeakPtr(std::nullptr_t) {}
+
+ // Allow conversion from U to T provided U "is a" T. Note that this
+ // is separate from the (implicit) copy and move constructors.
+ template <typename U>
+ WeakPtr(const WeakPtr<U>& other) : WeakPtrBase(other) {
+ // Need to cast from U* to T* to do pointer adjustment in case of multiple
+ // inheritance. This also enforces the "U is a T" rule.
+ T* t = reinterpret_cast<U*>(other.ptr_);
+ ptr_ = reinterpret_cast<uintptr_t>(t);
+ }
+ template <typename U>
+ WeakPtr(WeakPtr<U>&& other) noexcept : WeakPtrBase(std::move(other)) {
+ // Need to cast from U* to T* to do pointer adjustment in case of multiple
+ // inheritance. This also enforces the "U is a T" rule.
+ T* t = reinterpret_cast<U*>(other.ptr_);
+ ptr_ = reinterpret_cast<uintptr_t>(t);
+ }
+
+ T* get() const {
+ return ref_.IsValid() ? reinterpret_cast<T*>(ptr_) : nullptr;
+ }
+
+ T& operator*() const {
+ DCHECK(get() != nullptr);
+ return *get();
+ }
+ T* operator->() const {
+ DCHECK(get() != nullptr);
+ return get();
+ }
+
+ // Allow conditionals to test validity, e.g. if (weak_ptr) {...};
+ explicit operator bool() const { return get() != nullptr; }
+
+ // Returns false if the WeakPtr is confirmed to be invalid. This call is safe
+ // to make from any thread, e.g. to optimize away unnecessary work, but
+ // operator bool() must always be called, on the correct sequence, before
+ // actually using the pointer.
+ //
+ // Warning: as with any object, this call is only thread-safe if the WeakPtr
+ // instance isn't being re-assigned or reset() racily with this call.
+ bool MaybeValid() const { return ref_.MaybeValid(); }
+
+ // Returns whether the object |this| points to has been invalidated. This can
+ // be used to distinguish a WeakPtr to a destroyed object from one that has
+ // been explicitly set to null.
+ bool WasInvalidated() const { return ptr_ && !ref_.IsValid(); }
+
+ private:
+ friend class internal::SupportsWeakPtrBase;
+ template <typename U> friend class WeakPtr;
+ friend class SupportsWeakPtr<T>;
+ friend class WeakPtrFactory<T>;
+
+ WeakPtr(const internal::WeakReference& ref, T* ptr)
+ : WeakPtrBase(ref, reinterpret_cast<uintptr_t>(ptr)) {}
+};
+
+// Allow callers to compare WeakPtrs against nullptr to test validity.
+template <class T>
+bool operator!=(const WeakPtr<T>& weak_ptr, std::nullptr_t) {
+ return !(weak_ptr == nullptr);
+}
+template <class T>
+bool operator!=(std::nullptr_t, const WeakPtr<T>& weak_ptr) {
+ return weak_ptr != nullptr;
+}
+template <class T>
+bool operator==(const WeakPtr<T>& weak_ptr, std::nullptr_t) {
+ return weak_ptr.get() == nullptr;
+}
+template <class T>
+bool operator==(std::nullptr_t, const WeakPtr<T>& weak_ptr) {
+ return weak_ptr == nullptr;
+}
+
+namespace internal {
+class BASE_EXPORT WeakPtrFactoryBase {
+ protected:
+ WeakPtrFactoryBase(uintptr_t ptr);
+ ~WeakPtrFactoryBase();
+ internal::WeakReferenceOwner weak_reference_owner_;
+ uintptr_t ptr_;
+};
+} // namespace internal
+
+// A class may be composed of a WeakPtrFactory and thereby
+// control how it exposes weak pointers to itself. This is helpful if you only
+// need weak pointers within the implementation of a class. This class is also
+// useful when working with primitive types. For example, you could have a
+// WeakPtrFactory<bool> that is used to pass around a weak reference to a bool.
+template <class T>
+class WeakPtrFactory : public internal::WeakPtrFactoryBase {
+ public:
+ explicit WeakPtrFactory(T* ptr)
+ : WeakPtrFactoryBase(reinterpret_cast<uintptr_t>(ptr)) {}
+
+ ~WeakPtrFactory() = default;
+
+ WeakPtr<T> GetWeakPtr() {
+ return WeakPtr<T>(weak_reference_owner_.GetRef(),
+ reinterpret_cast<T*>(ptr_));
+ }
+
+ // Call this method to invalidate all existing weak pointers.
+ void InvalidateWeakPtrs() {
+ DCHECK(ptr_);
+ weak_reference_owner_.Invalidate();
+ }
+
+ // Call this method to determine if any weak pointers exist.
+ bool HasWeakPtrs() const {
+ DCHECK(ptr_);
+ return weak_reference_owner_.HasRefs();
+ }
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(WeakPtrFactory);
+};
+
+// A class may extend from SupportsWeakPtr to let others take weak pointers to
+// it. This avoids the class itself implementing boilerplate to dispense weak
+// pointers. However, since SupportsWeakPtr's destructor won't invalidate
+// weak pointers to the class until after the derived class' members have been
+// destroyed, its use can lead to subtle use-after-destroy issues.
+template <class T>
+class SupportsWeakPtr : public internal::SupportsWeakPtrBase {
+ public:
+ SupportsWeakPtr() = default;
+
+ WeakPtr<T> AsWeakPtr() {
+ return WeakPtr<T>(weak_reference_owner_.GetRef(), static_cast<T*>(this));
+ }
+
+ protected:
+ ~SupportsWeakPtr() = default;
+
+ private:
+ internal::WeakReferenceOwner weak_reference_owner_;
+ DISALLOW_COPY_AND_ASSIGN(SupportsWeakPtr);
+};
+
+// Helper function that uses type deduction to safely return a WeakPtr<Derived>
+// when Derived doesn't directly extend SupportsWeakPtr<Derived>, instead it
+// extends a Base that extends SupportsWeakPtr<Base>.
+//
+// EXAMPLE:
+// class Base : public base::SupportsWeakPtr<Producer> {};
+// class Derived : public Base {};
+//
+// Derived derived;
+// base::WeakPtr<Derived> ptr = base::AsWeakPtr(&derived);
+//
+// Note that the following doesn't work (invalid type conversion) since
+// Derived::AsWeakPtr() is WeakPtr<Base> SupportsWeakPtr<Base>::AsWeakPtr(),
+// and there's no way to safely cast WeakPtr<Base> to WeakPtr<Derived> at
+// the caller.
+//
+// base::WeakPtr<Derived> ptr = derived.AsWeakPtr(); // Fails.
+
+template <typename Derived>
+WeakPtr<Derived> AsWeakPtr(Derived* t) {
+ return internal::SupportsWeakPtrBase::StaticAsWeakPtr<Derived>(t);
+}
+
+} // namespace base
+
+#endif // BASE_MEMORY_WEAK_PTR_H_