summaryrefslogtreecommitdiffstats
path: root/security/sandbox/chromium/base/containers
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 09:22:09 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 09:22:09 +0000
commit43a97878ce14b72f0981164f87f2e35e14151312 (patch)
tree620249daf56c0258faa40cbdcf9cfba06de2a846 /security/sandbox/chromium/base/containers
parentInitial commit. (diff)
downloadfirefox-43a97878ce14b72f0981164f87f2e35e14151312.tar.xz
firefox-43a97878ce14b72f0981164f87f2e35e14151312.zip
Adding upstream version 110.0.1.upstream/110.0.1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'security/sandbox/chromium/base/containers')
-rw-r--r--security/sandbox/chromium/base/containers/adapters.h55
-rw-r--r--security/sandbox/chromium/base/containers/buffer_iterator.h145
-rw-r--r--security/sandbox/chromium/base/containers/checked_iterators.h205
-rw-r--r--security/sandbox/chromium/base/containers/circular_deque.h1112
-rw-r--r--security/sandbox/chromium/base/containers/span.h530
-rw-r--r--security/sandbox/chromium/base/containers/stack.h23
-rw-r--r--security/sandbox/chromium/base/containers/util.h21
-rw-r--r--security/sandbox/chromium/base/containers/vector_buffer.h188
8 files changed, 2279 insertions, 0 deletions
diff --git a/security/sandbox/chromium/base/containers/adapters.h b/security/sandbox/chromium/base/containers/adapters.h
new file mode 100644
index 0000000000..ec33481752
--- /dev/null
+++ b/security/sandbox/chromium/base/containers/adapters.h
@@ -0,0 +1,55 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CONTAINERS_ADAPTERS_H_
+#define BASE_CONTAINERS_ADAPTERS_H_
+
+#include <stddef.h>
+
+#include <iterator>
+#include <utility>
+
+#include "base/macros.h"
+
+namespace base {
+
+namespace internal {
+
+// Internal adapter class for implementing base::Reversed.
+template <typename T>
+class ReversedAdapter {
+ public:
+ using Iterator = decltype(std::rbegin(std::declval<T&>()));
+
+ explicit ReversedAdapter(T& t) : t_(t) {}
+ ReversedAdapter(const ReversedAdapter& ra) : t_(ra.t_) {}
+
+ Iterator begin() const { return std::rbegin(t_); }
+ Iterator end() const { return std::rend(t_); }
+
+ private:
+ T& t_;
+
+ DISALLOW_ASSIGN(ReversedAdapter);
+};
+
+} // namespace internal
+
+// Reversed returns a container adapter usable in a range-based "for" statement
+// for iterating a reversible container in reverse order.
+//
+// Example:
+//
+// std::vector<int> v = ...;
+// for (int i : base::Reversed(v)) {
+// // iterates through v from back to front
+// }
+template <typename T>
+internal::ReversedAdapter<T> Reversed(T& t) {
+ return internal::ReversedAdapter<T>(t);
+}
+
+} // namespace base
+
+#endif // BASE_CONTAINERS_ADAPTERS_H_
diff --git a/security/sandbox/chromium/base/containers/buffer_iterator.h b/security/sandbox/chromium/base/containers/buffer_iterator.h
new file mode 100644
index 0000000000..a4fd670190
--- /dev/null
+++ b/security/sandbox/chromium/base/containers/buffer_iterator.h
@@ -0,0 +1,145 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CONTAINERS_BUFFER_ITERATOR_H_
+#define BASE_CONTAINERS_BUFFER_ITERATOR_H_
+
+#include <type_traits>
+
+#include "base/bit_cast.h"
+#include "base/containers/span.h"
+#include "base/numerics/checked_math.h"
+
+namespace base {
+
+// BufferIterator is a bounds-checked container utility to access variable-
+// length, heterogeneous structures contained within a buffer. If the data are
+// homogeneous, use base::span<> instead.
+//
+// After being created with a weakly-owned buffer, BufferIterator returns
+// pointers to structured data within the buffer. After each method call that
+// returns data in the buffer, the iterator position is advanced by the byte
+// size of the object (or span of objects) returned. If there are not enough
+// bytes remaining in the buffer to return the requested object(s), a nullptr
+// or empty span is returned.
+//
+// This class is similar to base::Pickle, which should be preferred for
+// serializing to disk. Pickle versions its header and does not support writing
+// structures, which are problematic for serialization due to struct padding and
+// version shear concerns.
+//
+// Example usage:
+//
+// std::vector<uint8_t> buffer(4096);
+// if (!ReadSomeData(&buffer, buffer.size())) {
+// LOG(ERROR) << "Failed to read data.";
+// return false;
+// }
+//
+// BufferIterator<uint8_t> iterator(buffer);
+// uint32_t* num_items = iterator.Object<uint32_t>();
+// if (!num_items) {
+// LOG(ERROR) << "No num_items field."
+// return false;
+// }
+//
+// base::span<const item_struct> items =
+// iterator.Span<item_struct>(*num_items);
+// if (items.size() != *num_items) {
+// LOG(ERROR) << "Not enough items.";
+// return false;
+// }
+//
+// // ... validate the objects in |items|.
+template <typename B>
+class BufferIterator {
+ public:
+ static_assert(std::is_same<std::remove_const_t<B>, char>::value ||
+ std::is_same<std::remove_const_t<B>, unsigned char>::value,
+ "Underlying buffer type must be char-type.");
+
+ BufferIterator() {}
+ BufferIterator(B* data, size_t size)
+ : BufferIterator(make_span(data, size)) {}
+ explicit BufferIterator(span<B> buffer)
+ : buffer_(buffer), remaining_(buffer) {}
+ ~BufferIterator() {}
+
+ // Returns a pointer to a mutable structure T in the buffer at the current
+ // position. On success, the iterator position is advanced by sizeof(T). If
+ // there are not sizeof(T) bytes remaining in the buffer, returns nullptr.
+ template <typename T,
+ typename =
+ typename std::enable_if_t<std::is_trivially_copyable<T>::value>>
+ T* MutableObject() {
+ size_t size = sizeof(T);
+ size_t next_position;
+ if (!CheckAdd(position(), size).AssignIfValid(&next_position))
+ return nullptr;
+ if (next_position > total_size())
+ return nullptr;
+ T* t = bit_cast<T*>(remaining_.data());
+ remaining_ = remaining_.subspan(size);
+ return t;
+ }
+
+ // Returns a const pointer to an object of type T in the buffer at the current
+ // position.
+ template <typename T,
+ typename =
+ typename std::enable_if_t<std::is_trivially_copyable<T>::value>>
+ const T* Object() {
+ return MutableObject<const T>();
+ }
+
+ // Returns a span of |count| T objects in the buffer at the current position.
+ // On success, the iterator position is advanced by |sizeof(T) * count|. If
+ // there are not enough bytes remaining in the buffer to fulfill the request,
+ // returns an empty span.
+ template <typename T,
+ typename =
+ typename std::enable_if_t<std::is_trivially_copyable<T>::value>>
+ span<T> MutableSpan(size_t count) {
+ size_t size;
+ if (!CheckMul(sizeof(T), count).AssignIfValid(&size))
+ return span<T>();
+ size_t next_position;
+ if (!CheckAdd(position(), size).AssignIfValid(&next_position))
+ return span<T>();
+ if (next_position > total_size())
+ return span<T>();
+ auto result = span<T>(bit_cast<T*>(remaining_.data()), count);
+ remaining_ = remaining_.subspan(size);
+ return result;
+ }
+
+ // Returns a span to |count| const objects of type T in the buffer at the
+ // current position.
+ template <typename T,
+ typename =
+ typename std::enable_if_t<std::is_trivially_copyable<T>::value>>
+ span<const T> Span(size_t count) {
+ return MutableSpan<const T>(count);
+ }
+
+ // Resets the iterator position to the absolute offset |to|.
+ void Seek(size_t to) { remaining_ = buffer_.subspan(to); }
+
+ // Returns the total size of the underlying buffer.
+ size_t total_size() { return buffer_.size(); }
+
+ // Returns the current position in the buffer.
+ size_t position() { return buffer_.size_bytes() - remaining_.size_bytes(); }
+
+ private:
+ // The original buffer that the iterator was constructed with.
+ const span<B> buffer_;
+ // A subspan of |buffer_| containing the remaining bytes to iterate over.
+ span<B> remaining_;
+ // Copy and assign allowed.
+};
+
+} // namespace base
+
+#endif // BASE_CONTAINERS_BUFFER_ITERATOR_H_
diff --git a/security/sandbox/chromium/base/containers/checked_iterators.h b/security/sandbox/chromium/base/containers/checked_iterators.h
new file mode 100644
index 0000000000..cdfd2909d4
--- /dev/null
+++ b/security/sandbox/chromium/base/containers/checked_iterators.h
@@ -0,0 +1,205 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CONTAINERS_CHECKED_ITERATORS_H_
+#define BASE_CONTAINERS_CHECKED_ITERATORS_H_
+
+#include <iterator>
+#include <memory>
+#include <type_traits>
+
+#include "base/containers/util.h"
+#include "base/logging.h"
+
+namespace base {
+
+template <typename T>
+class CheckedContiguousIterator {
+ public:
+ using difference_type = std::ptrdiff_t;
+ using value_type = std::remove_cv_t<T>;
+ using pointer = T*;
+ using reference = T&;
+ using iterator_category = std::random_access_iterator_tag;
+
+ // Required for converting constructor below.
+ template <typename U>
+ friend class CheckedContiguousIterator;
+
+ constexpr CheckedContiguousIterator() = default;
+ constexpr CheckedContiguousIterator(T* start, const T* end)
+ : CheckedContiguousIterator(start, start, end) {}
+ constexpr CheckedContiguousIterator(const T* start, T* current, const T* end)
+ : start_(start), current_(current), end_(end) {
+ CHECK_LE(start, current);
+ CHECK_LE(current, end);
+ }
+ constexpr CheckedContiguousIterator(const CheckedContiguousIterator& other) =
+ default;
+
+ // Converting constructor allowing conversions like CCI<T> to CCI<const T>,
+ // but disallowing CCI<const T> to CCI<T> or CCI<Derived> to CCI<Base>, which
+ // are unsafe. Furthermore, this is the same condition as used by the
+ // converting constructors of std::span<T> and std::unique_ptr<T[]>.
+ // See https://wg21.link/n4042 for details.
+ template <
+ typename U,
+ std::enable_if_t<std::is_convertible<U (*)[], T (*)[]>::value>* = nullptr>
+ constexpr CheckedContiguousIterator(const CheckedContiguousIterator<U>& other)
+ : start_(other.start_), current_(other.current_), end_(other.end_) {
+ // We explicitly don't delegate to the 3-argument constructor here. Its
+ // CHECKs would be redundant, since we expect |other| to maintain its own
+ // invariant. However, DCHECKs never hurt anybody. Presumably.
+ DCHECK_LE(other.start_, other.current_);
+ DCHECK_LE(other.current_, other.end_);
+ }
+
+ ~CheckedContiguousIterator() = default;
+
+ constexpr CheckedContiguousIterator& operator=(
+ const CheckedContiguousIterator& other) = default;
+
+ constexpr bool operator==(const CheckedContiguousIterator& other) const {
+ CheckComparable(other);
+ return current_ == other.current_;
+ }
+
+ constexpr bool operator!=(const CheckedContiguousIterator& other) const {
+ CheckComparable(other);
+ return current_ != other.current_;
+ }
+
+ constexpr bool operator<(const CheckedContiguousIterator& other) const {
+ CheckComparable(other);
+ return current_ < other.current_;
+ }
+
+ constexpr bool operator<=(const CheckedContiguousIterator& other) const {
+ CheckComparable(other);
+ return current_ <= other.current_;
+ }
+
+ constexpr bool operator>(const CheckedContiguousIterator& other) const {
+ CheckComparable(other);
+ return current_ > other.current_;
+ }
+
+ constexpr bool operator>=(const CheckedContiguousIterator& other) const {
+ CheckComparable(other);
+ return current_ >= other.current_;
+ }
+
+ constexpr CheckedContiguousIterator& operator++() {
+ CHECK_NE(current_, end_);
+ ++current_;
+ return *this;
+ }
+
+ constexpr CheckedContiguousIterator operator++(int) {
+ CheckedContiguousIterator old = *this;
+ ++*this;
+ return old;
+ }
+
+ constexpr CheckedContiguousIterator& operator--() {
+ CHECK_NE(current_, start_);
+ --current_;
+ return *this;
+ }
+
+ constexpr CheckedContiguousIterator operator--(int) {
+ CheckedContiguousIterator old = *this;
+ --*this;
+ return old;
+ }
+
+ constexpr CheckedContiguousIterator& operator+=(difference_type rhs) {
+ if (rhs > 0) {
+ CHECK_LE(rhs, end_ - current_);
+ } else {
+ CHECK_LE(-rhs, current_ - start_);
+ }
+ current_ += rhs;
+ return *this;
+ }
+
+ constexpr CheckedContiguousIterator operator+(difference_type rhs) const {
+ CheckedContiguousIterator it = *this;
+ it += rhs;
+ return it;
+ }
+
+ constexpr CheckedContiguousIterator& operator-=(difference_type rhs) {
+ if (rhs < 0) {
+ CHECK_LE(-rhs, end_ - current_);
+ } else {
+ CHECK_LE(rhs, current_ - start_);
+ }
+ current_ -= rhs;
+ return *this;
+ }
+
+ constexpr CheckedContiguousIterator operator-(difference_type rhs) const {
+ CheckedContiguousIterator it = *this;
+ it -= rhs;
+ return it;
+ }
+
+ constexpr friend difference_type operator-(
+ const CheckedContiguousIterator& lhs,
+ const CheckedContiguousIterator& rhs) {
+ CHECK_EQ(lhs.start_, rhs.start_);
+ CHECK_EQ(lhs.end_, rhs.end_);
+ return lhs.current_ - rhs.current_;
+ }
+
+ constexpr reference operator*() const {
+ CHECK_NE(current_, end_);
+ return *current_;
+ }
+
+ constexpr pointer operator->() const {
+ CHECK_NE(current_, end_);
+ return current_;
+ }
+
+ constexpr reference operator[](difference_type rhs) const {
+ CHECK_GE(rhs, 0);
+ CHECK_LT(rhs, end_ - current_);
+ return current_[rhs];
+ }
+
+ static bool IsRangeMoveSafe(const CheckedContiguousIterator& from_begin,
+ const CheckedContiguousIterator& from_end,
+ const CheckedContiguousIterator& to)
+ WARN_UNUSED_RESULT {
+ if (from_end < from_begin)
+ return false;
+ const auto from_begin_uintptr = get_uintptr(from_begin.current_);
+ const auto from_end_uintptr = get_uintptr(from_end.current_);
+ const auto to_begin_uintptr = get_uintptr(to.current_);
+ const auto to_end_uintptr =
+ get_uintptr((to + std::distance(from_begin, from_end)).current_);
+
+ return to_begin_uintptr >= from_end_uintptr ||
+ to_end_uintptr <= from_begin_uintptr;
+ }
+
+ private:
+ constexpr void CheckComparable(const CheckedContiguousIterator& other) const {
+ CHECK_EQ(start_, other.start_);
+ CHECK_EQ(end_, other.end_);
+ }
+
+ const T* start_ = nullptr;
+ T* current_ = nullptr;
+ const T* end_ = nullptr;
+};
+
+template <typename T>
+using CheckedContiguousConstIterator = CheckedContiguousIterator<const T>;
+
+} // namespace base
+
+#endif // BASE_CONTAINERS_CHECKED_ITERATORS_H_
diff --git a/security/sandbox/chromium/base/containers/circular_deque.h b/security/sandbox/chromium/base/containers/circular_deque.h
new file mode 100644
index 0000000000..0d452b56be
--- /dev/null
+++ b/security/sandbox/chromium/base/containers/circular_deque.h
@@ -0,0 +1,1112 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CONTAINERS_CIRCULAR_DEQUE_H_
+#define BASE_CONTAINERS_CIRCULAR_DEQUE_H_
+
+#include <algorithm>
+#include <cstddef>
+#include <iterator>
+#include <type_traits>
+#include <utility>
+
+#include "base/containers/vector_buffer.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/stl_util.h"
+#include "base/template_util.h"
+
+// base::circular_deque is similar to std::deque. Unlike std::deque, the
+// storage is provided in a flat circular buffer conceptually similar to a
+// vector. The beginning and end will wrap around as necessary so that
+// pushes and pops will be constant time as long as a capacity expansion is
+// not required.
+//
+// The API should be identical to std::deque with the following differences:
+//
+// - ITERATORS ARE NOT STABLE. Mutating the container will invalidate all
+// iterators.
+//
+// - Insertions may resize the vector and so are not constant time (std::deque
+// guarantees constant time for insertions at the ends).
+//
+// - Container-wide comparisons are not implemented. If you want to compare
+// two containers, use an algorithm so the expensive iteration is explicit.
+//
+// If you want a similar container with only a queue API, use base::queue in
+// base/containers/queue.h.
+//
+// Constructors:
+// circular_deque();
+// circular_deque(size_t count);
+// circular_deque(size_t count, const T& value);
+// circular_deque(InputIterator first, InputIterator last);
+// circular_deque(const circular_deque&);
+// circular_deque(circular_deque&&);
+// circular_deque(std::initializer_list<value_type>);
+//
+// Assignment functions:
+// circular_deque& operator=(const circular_deque&);
+// circular_deque& operator=(circular_deque&&);
+// circular_deque& operator=(std::initializer_list<T>);
+// void assign(size_t count, const T& value);
+// void assign(InputIterator first, InputIterator last);
+// void assign(std::initializer_list<T> value);
+//
+// Random accessors:
+// T& at(size_t);
+// const T& at(size_t) const;
+// T& operator[](size_t);
+// const T& operator[](size_t) const;
+//
+// End accessors:
+// T& front();
+// const T& front() const;
+// T& back();
+// const T& back() const;
+//
+// Iterator functions:
+// iterator begin();
+// const_iterator begin() const;
+// const_iterator cbegin() const;
+// iterator end();
+// const_iterator end() const;
+// const_iterator cend() const;
+// reverse_iterator rbegin();
+// const_reverse_iterator rbegin() const;
+// const_reverse_iterator crbegin() const;
+// reverse_iterator rend();
+// const_reverse_iterator rend() const;
+// const_reverse_iterator crend() const;
+//
+// Memory management:
+// void reserve(size_t); // SEE IMPLEMENTATION FOR SOME GOTCHAS.
+// size_t capacity() const;
+// void shrink_to_fit();
+//
+// Size management:
+// void clear();
+// bool empty() const;
+// size_t size() const;
+// void resize(size_t);
+// void resize(size_t count, const T& value);
+//
+// Positional insert and erase:
+// void insert(const_iterator pos, size_type count, const T& value);
+// void insert(const_iterator pos,
+// InputIterator first, InputIterator last);
+// iterator insert(const_iterator pos, const T& value);
+// iterator insert(const_iterator pos, T&& value);
+// iterator emplace(const_iterator pos, Args&&... args);
+// iterator erase(const_iterator pos);
+// iterator erase(const_iterator first, const_iterator last);
+//
+// End insert and erase:
+// void push_front(const T&);
+// void push_front(T&&);
+// void push_back(const T&);
+// void push_back(T&&);
+// T& emplace_front(Args&&...);
+// T& emplace_back(Args&&...);
+// void pop_front();
+// void pop_back();
+//
+// General:
+// void swap(circular_deque&);
+
+namespace base {
+
+template <class T>
+class circular_deque;
+
+namespace internal {
+
+// Start allocating nonempty buffers with this many entries. This is the
+// external capacity so the internal buffer will be one larger (= 4) which is
+// more even for the allocator. See the descriptions of internal vs. external
+// capacity on the comment above the buffer_ variable below.
+constexpr size_t kCircularBufferInitialCapacity = 3;
+
+template <typename T>
+class circular_deque_const_iterator {
+ public:
+ using difference_type = std::ptrdiff_t;
+ using value_type = T;
+ using pointer = const T*;
+ using reference = const T&;
+ using iterator_category = std::random_access_iterator_tag;
+
+ circular_deque_const_iterator() : parent_deque_(nullptr), index_(0) {
+#if DCHECK_IS_ON()
+ created_generation_ = 0;
+#endif // DCHECK_IS_ON()
+ }
+
+ // Dereferencing.
+ const T& operator*() const {
+ CheckUnstableUsage();
+ parent_deque_->CheckValidIndex(index_);
+ return parent_deque_->buffer_[index_];
+ }
+ const T* operator->() const {
+ CheckUnstableUsage();
+ parent_deque_->CheckValidIndex(index_);
+ return &parent_deque_->buffer_[index_];
+ }
+ const value_type& operator[](difference_type i) const { return *(*this + i); }
+
+ // Increment and decrement.
+ circular_deque_const_iterator& operator++() {
+ Increment();
+ return *this;
+ }
+ circular_deque_const_iterator operator++(int) {
+ circular_deque_const_iterator ret = *this;
+ Increment();
+ return ret;
+ }
+ circular_deque_const_iterator& operator--() {
+ Decrement();
+ return *this;
+ }
+ circular_deque_const_iterator operator--(int) {
+ circular_deque_const_iterator ret = *this;
+ Decrement();
+ return ret;
+ }
+
+ // Random access mutation.
+ friend circular_deque_const_iterator operator+(
+ const circular_deque_const_iterator& iter,
+ difference_type offset) {
+ circular_deque_const_iterator ret = iter;
+ ret.Add(offset);
+ return ret;
+ }
+ circular_deque_const_iterator& operator+=(difference_type offset) {
+ Add(offset);
+ return *this;
+ }
+ friend circular_deque_const_iterator operator-(
+ const circular_deque_const_iterator& iter,
+ difference_type offset) {
+ circular_deque_const_iterator ret = iter;
+ ret.Add(-offset);
+ return ret;
+ }
+ circular_deque_const_iterator& operator-=(difference_type offset) {
+ Add(-offset);
+ return *this;
+ }
+
+ friend std::ptrdiff_t operator-(const circular_deque_const_iterator& lhs,
+ const circular_deque_const_iterator& rhs) {
+ lhs.CheckComparable(rhs);
+ return lhs.OffsetFromBegin() - rhs.OffsetFromBegin();
+ }
+
+ // Comparisons.
+ friend bool operator==(const circular_deque_const_iterator& lhs,
+ const circular_deque_const_iterator& rhs) {
+ lhs.CheckComparable(rhs);
+ return lhs.index_ == rhs.index_;
+ }
+ friend bool operator!=(const circular_deque_const_iterator& lhs,
+ const circular_deque_const_iterator& rhs) {
+ return !(lhs == rhs);
+ }
+ friend bool operator<(const circular_deque_const_iterator& lhs,
+ const circular_deque_const_iterator& rhs) {
+ lhs.CheckComparable(rhs);
+ return lhs.OffsetFromBegin() < rhs.OffsetFromBegin();
+ }
+ friend bool operator<=(const circular_deque_const_iterator& lhs,
+ const circular_deque_const_iterator& rhs) {
+ return !(lhs > rhs);
+ }
+ friend bool operator>(const circular_deque_const_iterator& lhs,
+ const circular_deque_const_iterator& rhs) {
+ lhs.CheckComparable(rhs);
+ return lhs.OffsetFromBegin() > rhs.OffsetFromBegin();
+ }
+ friend bool operator>=(const circular_deque_const_iterator& lhs,
+ const circular_deque_const_iterator& rhs) {
+ return !(lhs < rhs);
+ }
+
+ protected:
+ friend class circular_deque<T>;
+
+ circular_deque_const_iterator(const circular_deque<T>* parent, size_t index)
+ : parent_deque_(parent), index_(index) {
+#if DCHECK_IS_ON()
+ created_generation_ = parent->generation_;
+#endif // DCHECK_IS_ON()
+ }
+
+ // Returns the offset from the beginning index of the buffer to the current
+ // item.
+ size_t OffsetFromBegin() const {
+ if (index_ >= parent_deque_->begin_)
+ return index_ - parent_deque_->begin_; // On the same side as begin.
+ return parent_deque_->buffer_.capacity() - parent_deque_->begin_ + index_;
+ }
+
+ // Most uses will be ++ and -- so use a simplified implementation.
+ void Increment() {
+ CheckUnstableUsage();
+ parent_deque_->CheckValidIndex(index_);
+ index_++;
+ if (index_ == parent_deque_->buffer_.capacity())
+ index_ = 0;
+ }
+ void Decrement() {
+ CheckUnstableUsage();
+ parent_deque_->CheckValidIndexOrEnd(index_);
+ if (index_ == 0)
+ index_ = parent_deque_->buffer_.capacity() - 1;
+ else
+ index_--;
+ }
+ void Add(difference_type delta) {
+ CheckUnstableUsage();
+#if DCHECK_IS_ON()
+ if (delta <= 0)
+ parent_deque_->CheckValidIndexOrEnd(index_);
+ else
+ parent_deque_->CheckValidIndex(index_);
+#endif
+ // It should be valid to add 0 to any iterator, even if the container is
+ // empty and the iterator points to end(). The modulo below will divide
+ // by 0 if the buffer capacity is empty, so it's important to check for
+ // this case explicitly.
+ if (delta == 0)
+ return;
+
+ difference_type new_offset = OffsetFromBegin() + delta;
+ DCHECK(new_offset >= 0 &&
+ new_offset <= static_cast<difference_type>(parent_deque_->size()));
+ index_ = (new_offset + parent_deque_->begin_) %
+ parent_deque_->buffer_.capacity();
+ }
+
+#if DCHECK_IS_ON()
+ void CheckUnstableUsage() const {
+ DCHECK(parent_deque_);
+ // Since circular_deque doesn't guarantee stability, any attempt to
+ // dereference this iterator after a mutation (i.e. the generation doesn't
+ // match the original) in the container is illegal.
+ DCHECK_EQ(created_generation_, parent_deque_->generation_)
+ << "circular_deque iterator dereferenced after mutation.";
+ }
+ void CheckComparable(const circular_deque_const_iterator& other) const {
+ DCHECK_EQ(parent_deque_, other.parent_deque_);
+ // Since circular_deque doesn't guarantee stability, two iterators that
+ // are compared must have been generated without mutating the container.
+ // If this fires, the container was mutated between generating the two
+ // iterators being compared.
+ DCHECK_EQ(created_generation_, other.created_generation_);
+ }
+#else
+ inline void CheckUnstableUsage() const {}
+ inline void CheckComparable(const circular_deque_const_iterator&) const {}
+#endif // DCHECK_IS_ON()
+
+ const circular_deque<T>* parent_deque_;
+ size_t index_;
+
+#if DCHECK_IS_ON()
+ // The generation of the parent deque when this iterator was created. The
+ // container will update the generation for every modification so we can
+ // test if the container was modified by comparing them.
+ uint64_t created_generation_;
+#endif // DCHECK_IS_ON()
+};
+
+template <typename T>
+class circular_deque_iterator : public circular_deque_const_iterator<T> {
+ using base = circular_deque_const_iterator<T>;
+
+ public:
+ friend class circular_deque<T>;
+
+ using difference_type = std::ptrdiff_t;
+ using value_type = T;
+ using pointer = T*;
+ using reference = T&;
+ using iterator_category = std::random_access_iterator_tag;
+
+ // Expose the base class' constructor.
+ circular_deque_iterator() : circular_deque_const_iterator<T>() {}
+
+ // Dereferencing.
+ T& operator*() const { return const_cast<T&>(base::operator*()); }
+ T* operator->() const { return const_cast<T*>(base::operator->()); }
+ T& operator[](difference_type i) {
+ return const_cast<T&>(base::operator[](i));
+ }
+
+ // Random access mutation.
+ friend circular_deque_iterator operator+(const circular_deque_iterator& iter,
+ difference_type offset) {
+ circular_deque_iterator ret = iter;
+ ret.Add(offset);
+ return ret;
+ }
+ circular_deque_iterator& operator+=(difference_type offset) {
+ base::Add(offset);
+ return *this;
+ }
+ friend circular_deque_iterator operator-(const circular_deque_iterator& iter,
+ difference_type offset) {
+ circular_deque_iterator ret = iter;
+ ret.Add(-offset);
+ return ret;
+ }
+ circular_deque_iterator& operator-=(difference_type offset) {
+ base::Add(-offset);
+ return *this;
+ }
+
+ // Increment and decrement.
+ circular_deque_iterator& operator++() {
+ base::Increment();
+ return *this;
+ }
+ circular_deque_iterator operator++(int) {
+ circular_deque_iterator ret = *this;
+ base::Increment();
+ return ret;
+ }
+ circular_deque_iterator& operator--() {
+ base::Decrement();
+ return *this;
+ }
+ circular_deque_iterator operator--(int) {
+ circular_deque_iterator ret = *this;
+ base::Decrement();
+ return ret;
+ }
+
+ private:
+ circular_deque_iterator(const circular_deque<T>* parent, size_t index)
+ : circular_deque_const_iterator<T>(parent, index) {}
+};
+
+} // namespace internal
+
+template <typename T>
+class circular_deque {
+ private:
+ using VectorBuffer = internal::VectorBuffer<T>;
+
+ public:
+ using value_type = T;
+ using size_type = std::size_t;
+ using difference_type = std::ptrdiff_t;
+ using reference = value_type&;
+ using const_reference = const value_type&;
+ using pointer = value_type*;
+ using const_pointer = const value_type*;
+
+ using iterator = internal::circular_deque_iterator<T>;
+ using const_iterator = internal::circular_deque_const_iterator<T>;
+ using reverse_iterator = std::reverse_iterator<iterator>;
+ using const_reverse_iterator = std::reverse_iterator<const_iterator>;
+
+ // ---------------------------------------------------------------------------
+ // Constructor
+
+ constexpr circular_deque() = default;
+
+ // Constructs with |count| copies of |value| or default constructed version.
+ circular_deque(size_type count) { resize(count); }
+ circular_deque(size_type count, const T& value) { resize(count, value); }
+
+ // Range constructor.
+ template <class InputIterator>
+ circular_deque(InputIterator first, InputIterator last) {
+ assign(first, last);
+ }
+
+ // Copy/move.
+ circular_deque(const circular_deque& other) : buffer_(other.size() + 1) {
+ assign(other.begin(), other.end());
+ }
+ circular_deque(circular_deque&& other) noexcept
+ : buffer_(std::move(other.buffer_)),
+ begin_(other.begin_),
+ end_(other.end_) {
+ other.begin_ = 0;
+ other.end_ = 0;
+ }
+
+ circular_deque(std::initializer_list<value_type> init) { assign(init); }
+
+ ~circular_deque() { DestructRange(begin_, end_); }
+
+ // ---------------------------------------------------------------------------
+ // Assignments.
+ //
+ // All of these may invalidate iterators and references.
+
+ circular_deque& operator=(const circular_deque& other) {
+ if (&other == this)
+ return *this;
+
+ reserve(other.size());
+ assign(other.begin(), other.end());
+ return *this;
+ }
+ circular_deque& operator=(circular_deque&& other) noexcept {
+ if (&other == this)
+ return *this;
+
+ // We're about to overwrite the buffer, so don't free it in clear to
+ // avoid doing it twice.
+ ClearRetainCapacity();
+ buffer_ = std::move(other.buffer_);
+ begin_ = other.begin_;
+ end_ = other.end_;
+
+ other.begin_ = 0;
+ other.end_ = 0;
+
+ IncrementGeneration();
+ return *this;
+ }
+ circular_deque& operator=(std::initializer_list<value_type> ilist) {
+ reserve(ilist.size());
+ assign(std::begin(ilist), std::end(ilist));
+ return *this;
+ }
+
+ void assign(size_type count, const value_type& value) {
+ ClearRetainCapacity();
+ reserve(count);
+ for (size_t i = 0; i < count; i++)
+ emplace_back(value);
+ IncrementGeneration();
+ }
+
+ // This variant should be enabled only when InputIterator is an iterator.
+ template <typename InputIterator>
+ typename std::enable_if<::base::internal::is_iterator<InputIterator>::value,
+ void>::type
+ assign(InputIterator first, InputIterator last) {
+ // Possible future enhancement, dispatch on iterator tag type. For forward
+ // iterators we can use std::difference to preallocate the space required
+ // and only do one copy.
+ ClearRetainCapacity();
+ for (; first != last; ++first)
+ emplace_back(*first);
+ IncrementGeneration();
+ }
+
+ void assign(std::initializer_list<value_type> value) {
+ reserve(std::distance(value.begin(), value.end()));
+ assign(value.begin(), value.end());
+ }
+
+ // ---------------------------------------------------------------------------
+ // Accessors.
+ //
+ // Since this class assumes no exceptions, at() and operator[] are equivalent.
+
+ const value_type& at(size_type i) const {
+ DCHECK(i < size());
+ size_t right_size = buffer_.capacity() - begin_;
+ if (begin_ <= end_ || i < right_size)
+ return buffer_[begin_ + i];
+ return buffer_[i - right_size];
+ }
+ value_type& at(size_type i) {
+ return const_cast<value_type&>(as_const(*this).at(i));
+ }
+
+ value_type& operator[](size_type i) {
+ return const_cast<value_type&>(as_const(*this)[i]);
+ }
+
+ const value_type& operator[](size_type i) const { return at(i); }
+
+ value_type& front() {
+ DCHECK(!empty());
+ return buffer_[begin_];
+ }
+ const value_type& front() const {
+ DCHECK(!empty());
+ return buffer_[begin_];
+ }
+
+ value_type& back() {
+ DCHECK(!empty());
+ return *(--end());
+ }
+ const value_type& back() const {
+ DCHECK(!empty());
+ return *(--end());
+ }
+
+ // ---------------------------------------------------------------------------
+ // Iterators.
+
+ iterator begin() { return iterator(this, begin_); }
+ const_iterator begin() const { return const_iterator(this, begin_); }
+ const_iterator cbegin() const { return const_iterator(this, begin_); }
+
+ iterator end() { return iterator(this, end_); }
+ const_iterator end() const { return const_iterator(this, end_); }
+ const_iterator cend() const { return const_iterator(this, end_); }
+
+ reverse_iterator rbegin() { return reverse_iterator(end()); }
+ const_reverse_iterator rbegin() const {
+ return const_reverse_iterator(end());
+ }
+ const_reverse_iterator crbegin() const { return rbegin(); }
+
+ reverse_iterator rend() { return reverse_iterator(begin()); }
+ const_reverse_iterator rend() const {
+ return const_reverse_iterator(begin());
+ }
+ const_reverse_iterator crend() const { return rend(); }
+
+ // ---------------------------------------------------------------------------
+ // Memory management.
+
+ // IMPORTANT NOTE ON reserve(...): This class implements auto-shrinking of
+ // the buffer when elements are deleted and there is "too much" wasted space.
+ // So if you call reserve() with a large size in anticipation of pushing many
+ // elements, but pop an element before the queue is full, the capacity you
+ // reserved may be lost.
+ //
+ // As a result, it's only worthwhile to call reserve() when you're adding
+ // many things at once with no intermediate operations.
+ void reserve(size_type new_capacity) {
+ if (new_capacity > capacity())
+ SetCapacityTo(new_capacity);
+ }
+
+ size_type capacity() const {
+ // One item is wasted to indicate end().
+ return buffer_.capacity() == 0 ? 0 : buffer_.capacity() - 1;
+ }
+
+ void shrink_to_fit() {
+ if (empty()) {
+ // Optimize empty case to really delete everything if there was
+ // something.
+ if (buffer_.capacity())
+ buffer_ = VectorBuffer();
+ } else {
+ SetCapacityTo(size());
+ }
+ }
+
+ // ---------------------------------------------------------------------------
+ // Size management.
+
+ // This will additionally reset the capacity() to 0.
+ void clear() {
+ // This can't resize(0) because that requires a default constructor to
+ // compile, which not all contained classes may implement.
+ ClearRetainCapacity();
+ buffer_ = VectorBuffer();
+ }
+
+ bool empty() const { return begin_ == end_; }
+
+ size_type size() const {
+ if (begin_ <= end_)
+ return end_ - begin_;
+ return buffer_.capacity() - begin_ + end_;
+ }
+
+ // When reducing size, the elements are deleted from the end. When expanding
+ // size, elements are added to the end with |value| or the default
+ // constructed version. Even when using resize(count) to shrink, a default
+ // constructor is required for the code to compile, even though it will not
+ // be called.
+ //
+ // There are two versions rather than using a default value to avoid
+ // creating a temporary when shrinking (when it's not needed). Plus if
+ // the default constructor is desired when expanding usually just calling it
+ // for each element is faster than making a default-constructed temporary and
+ // copying it.
+ void resize(size_type count) {
+ // SEE BELOW VERSION if you change this. The code is mostly the same.
+ if (count > size()) {
+ // This could be slighly more efficient but expanding a queue with
+ // identical elements is unusual and the extra computations of emplacing
+ // one-by-one will typically be small relative to calling the constructor
+ // for every item.
+ ExpandCapacityIfNecessary(count - size());
+ while (size() < count)
+ emplace_back();
+ } else if (count < size()) {
+ size_t new_end = (begin_ + count) % buffer_.capacity();
+ DestructRange(new_end, end_);
+ end_ = new_end;
+
+ ShrinkCapacityIfNecessary();
+ }
+ IncrementGeneration();
+ }
+ void resize(size_type count, const value_type& value) {
+ // SEE ABOVE VERSION if you change this. The code is mostly the same.
+ if (count > size()) {
+ ExpandCapacityIfNecessary(count - size());
+ while (size() < count)
+ emplace_back(value);
+ } else if (count < size()) {
+ size_t new_end = (begin_ + count) % buffer_.capacity();
+ DestructRange(new_end, end_);
+ end_ = new_end;
+
+ ShrinkCapacityIfNecessary();
+ }
+ IncrementGeneration();
+ }
+
+ // ---------------------------------------------------------------------------
+ // Insert and erase.
+ //
+ // Insertion and deletion in the middle is O(n) and invalidates all existing
+ // iterators.
+ //
+ // The implementation of insert isn't optimized as much as it could be. If
+ // the insertion requires that the buffer be grown, it will first be grown
+ // and everything moved, and then the items will be inserted, potentially
+ // moving some items twice. This simplifies the implemntation substantially
+ // and means less generated templatized code. Since this is an uncommon
+ // operation for deques, and already relatively slow, it doesn't seem worth
+ // the benefit to optimize this.
+
+ void insert(const_iterator pos, size_type count, const T& value) {
+ ValidateIterator(pos);
+
+ // Optimize insert at the beginning.
+ if (pos == begin()) {
+ ExpandCapacityIfNecessary(count);
+ for (size_t i = 0; i < count; i++)
+ push_front(value);
+ return;
+ }
+
+ iterator insert_cur(this, pos.index_);
+ iterator insert_end;
+ MakeRoomFor(count, &insert_cur, &insert_end);
+ while (insert_cur < insert_end) {
+ new (&buffer_[insert_cur.index_]) T(value);
+ ++insert_cur;
+ }
+
+ IncrementGeneration();
+ }
+
+ // This enable_if keeps this call from getting confused with the (pos, count,
+ // value) version when value is an integer.
+ template <class InputIterator>
+ typename std::enable_if<::base::internal::is_iterator<InputIterator>::value,
+ void>::type
+ insert(const_iterator pos, InputIterator first, InputIterator last) {
+ ValidateIterator(pos);
+
+ size_t inserted_items = std::distance(first, last);
+ if (inserted_items == 0)
+ return; // Can divide by 0 when doing modulo below, so return early.
+
+ // Make a hole to copy the items into.
+ iterator insert_cur;
+ iterator insert_end;
+ if (pos == begin()) {
+ // Optimize insert at the beginning, nothing needs to be shifted and the
+ // hole is the |inserted_items| block immediately before |begin_|.
+ ExpandCapacityIfNecessary(inserted_items);
+ insert_end = begin();
+ begin_ =
+ (begin_ + buffer_.capacity() - inserted_items) % buffer_.capacity();
+ insert_cur = begin();
+ } else {
+ insert_cur = iterator(this, pos.index_);
+ MakeRoomFor(inserted_items, &insert_cur, &insert_end);
+ }
+
+ // Copy the items.
+ while (insert_cur < insert_end) {
+ new (&buffer_[insert_cur.index_]) T(*first);
+ ++insert_cur;
+ ++first;
+ }
+
+ IncrementGeneration();
+ }
+
+ // These all return an iterator to the inserted item. Existing iterators will
+ // be invalidated.
+ iterator insert(const_iterator pos, const T& value) {
+ return emplace(pos, value);
+ }
+ iterator insert(const_iterator pos, T&& value) {
+ return emplace(pos, std::move(value));
+ }
+ template <class... Args>
+ iterator emplace(const_iterator pos, Args&&... args) {
+ ValidateIterator(pos);
+
+ // Optimize insert at beginning which doesn't require shifting.
+ if (pos == cbegin()) {
+ emplace_front(std::forward<Args>(args)...);
+ return begin();
+ }
+
+ // Do this before we make the new iterators we return.
+ IncrementGeneration();
+
+ iterator insert_begin(this, pos.index_);
+ iterator insert_end;
+ MakeRoomFor(1, &insert_begin, &insert_end);
+ new (&buffer_[insert_begin.index_]) T(std::forward<Args>(args)...);
+
+ return insert_begin;
+ }
+
+ // Calling erase() won't automatically resize the buffer smaller like resize
+ // or the pop functions. Erase is slow and relatively uncommon, and for
+ // normal deque usage a pop will normally be done on a regular basis that
+ // will prevent excessive buffer usage over long periods of time. It's not
+ // worth having the extra code for every template instantiation of erase()
+ // to resize capacity downward to a new buffer.
+ iterator erase(const_iterator pos) { return erase(pos, pos + 1); }
+ iterator erase(const_iterator first, const_iterator last) {
+ ValidateIterator(first);
+ ValidateIterator(last);
+
+ IncrementGeneration();
+
+ // First, call the destructor on the deleted items.
+ if (first.index_ == last.index_) {
+ // Nothing deleted. Need to return early to avoid falling through to
+ // moving items on top of themselves.
+ return iterator(this, first.index_);
+ } else if (first.index_ < last.index_) {
+ // Contiguous range.
+ buffer_.DestructRange(&buffer_[first.index_], &buffer_[last.index_]);
+ } else {
+ // Deleted range wraps around.
+ buffer_.DestructRange(&buffer_[first.index_],
+ &buffer_[buffer_.capacity()]);
+ buffer_.DestructRange(&buffer_[0], &buffer_[last.index_]);
+ }
+
+ if (first.index_ == begin_) {
+ // This deletion is from the beginning. Nothing needs to be copied, only
+ // begin_ needs to be updated.
+ begin_ = last.index_;
+ return iterator(this, last.index_);
+ }
+
+ // In an erase operation, the shifted items all move logically to the left,
+ // so move them from left-to-right.
+ iterator move_src(this, last.index_);
+ iterator move_src_end = end();
+ iterator move_dest(this, first.index_);
+ for (; move_src < move_src_end; move_src++, move_dest++) {
+ buffer_.MoveRange(&buffer_[move_src.index_],
+ &buffer_[move_src.index_ + 1],
+ &buffer_[move_dest.index_]);
+ }
+
+ end_ = move_dest.index_;
+
+ // Since we did not reallocate and only changed things after the erase
+ // element(s), the input iterator's index points to the thing following the
+ // deletion.
+ return iterator(this, first.index_);
+ }
+
+ // ---------------------------------------------------------------------------
+ // Begin/end operations.
+
+ void push_front(const T& value) { emplace_front(value); }
+ void push_front(T&& value) { emplace_front(std::move(value)); }
+
+ void push_back(const T& value) { emplace_back(value); }
+ void push_back(T&& value) { emplace_back(std::move(value)); }
+
+ template <class... Args>
+ reference emplace_front(Args&&... args) {
+ ExpandCapacityIfNecessary(1);
+ if (begin_ == 0)
+ begin_ = buffer_.capacity() - 1;
+ else
+ begin_--;
+ IncrementGeneration();
+ new (&buffer_[begin_]) T(std::forward<Args>(args)...);
+ return front();
+ }
+
+ template <class... Args>
+ reference emplace_back(Args&&... args) {
+ ExpandCapacityIfNecessary(1);
+ new (&buffer_[end_]) T(std::forward<Args>(args)...);
+ if (end_ == buffer_.capacity() - 1)
+ end_ = 0;
+ else
+ end_++;
+ IncrementGeneration();
+ return back();
+ }
+
+ void pop_front() {
+ DCHECK(size());
+ buffer_.DestructRange(&buffer_[begin_], &buffer_[begin_ + 1]);
+ begin_++;
+ if (begin_ == buffer_.capacity())
+ begin_ = 0;
+
+ ShrinkCapacityIfNecessary();
+
+ // Technically popping will not invalidate any iterators since the
+ // underlying buffer will be stable. But in the future we may want to add a
+ // feature that resizes the buffer smaller if there is too much wasted
+ // space. This ensures we can make such a change safely.
+ IncrementGeneration();
+ }
+ void pop_back() {
+ DCHECK(size());
+ if (end_ == 0)
+ end_ = buffer_.capacity() - 1;
+ else
+ end_--;
+ buffer_.DestructRange(&buffer_[end_], &buffer_[end_ + 1]);
+
+ ShrinkCapacityIfNecessary();
+
+ // See pop_front comment about why this is here.
+ IncrementGeneration();
+ }
+
+ // ---------------------------------------------------------------------------
+ // General operations.
+
+ void swap(circular_deque& other) {
+ std::swap(buffer_, other.buffer_);
+ std::swap(begin_, other.begin_);
+ std::swap(end_, other.end_);
+ IncrementGeneration();
+ }
+
+ friend void swap(circular_deque& lhs, circular_deque& rhs) { lhs.swap(rhs); }
+
+ private:
+ friend internal::circular_deque_iterator<T>;
+ friend internal::circular_deque_const_iterator<T>;
+
+ // Moves the items in the given circular buffer to the current one. The
+ // source is moved from so will become invalid. The destination buffer must
+ // have already been allocated with enough size.
+ static void MoveBuffer(VectorBuffer& from_buf,
+ size_t from_begin,
+ size_t from_end,
+ VectorBuffer* to_buf,
+ size_t* to_begin,
+ size_t* to_end) {
+ size_t from_capacity = from_buf.capacity();
+
+ *to_begin = 0;
+ if (from_begin < from_end) {
+ // Contiguous.
+ from_buf.MoveRange(&from_buf[from_begin], &from_buf[from_end],
+ to_buf->begin());
+ *to_end = from_end - from_begin;
+ } else if (from_begin > from_end) {
+ // Discontiguous, copy the right side to the beginning of the new buffer.
+ from_buf.MoveRange(&from_buf[from_begin], &from_buf[from_capacity],
+ to_buf->begin());
+ size_t right_size = from_capacity - from_begin;
+ // Append the left side.
+ from_buf.MoveRange(&from_buf[0], &from_buf[from_end],
+ &(*to_buf)[right_size]);
+ *to_end = right_size + from_end;
+ } else {
+ // No items.
+ *to_end = 0;
+ }
+ }
+
+ // Expands the buffer size. This assumes the size is larger than the
+ // number of elements in the vector (it won't call delete on anything).
+ void SetCapacityTo(size_t new_capacity) {
+ // Use the capacity + 1 as the internal buffer size to differentiate
+ // empty and full (see definition of buffer_ below).
+ VectorBuffer new_buffer(new_capacity + 1);
+ MoveBuffer(buffer_, begin_, end_, &new_buffer, &begin_, &end_);
+ buffer_ = std::move(new_buffer);
+ }
+ void ExpandCapacityIfNecessary(size_t additional_elts) {
+ size_t min_new_capacity = size() + additional_elts;
+ if (capacity() >= min_new_capacity)
+ return; // Already enough room.
+
+ min_new_capacity =
+ std::max(min_new_capacity, internal::kCircularBufferInitialCapacity);
+
+ // std::vector always grows by at least 50%. WTF::Deque grows by at least
+ // 25%. We expect queue workloads to generally stay at a similar size and
+ // grow less than a vector might, so use 25%.
+ size_t new_capacity =
+ std::max(min_new_capacity, capacity() + capacity() / 4);
+ SetCapacityTo(new_capacity);
+ }
+
+ void ShrinkCapacityIfNecessary() {
+ // Don't auto-shrink below this size.
+ if (capacity() <= internal::kCircularBufferInitialCapacity)
+ return;
+
+ // Shrink when 100% of the size() is wasted.
+ size_t sz = size();
+ size_t empty_spaces = capacity() - sz;
+ if (empty_spaces < sz)
+ return;
+
+ // Leave 1/4 the size as free capacity, not going below the initial
+ // capacity.
+ size_t new_capacity =
+ std::max(internal::kCircularBufferInitialCapacity, sz + sz / 4);
+ if (new_capacity < capacity()) {
+ // Count extra item to convert to internal capacity.
+ SetCapacityTo(new_capacity);
+ }
+ }
+
+ // Backend for clear() but does not resize the internal buffer.
+ void ClearRetainCapacity() {
+ // This can't resize(0) because that requires a default constructor to
+ // compile, which not all contained classes may implement.
+ DestructRange(begin_, end_);
+ begin_ = 0;
+ end_ = 0;
+ IncrementGeneration();
+ }
+
+ // Calls destructors for the given begin->end indices. The indices may wrap
+ // around. The buffer is not resized, and the begin_ and end_ members are
+ // not changed.
+ void DestructRange(size_t begin, size_t end) {
+ if (end == begin) {
+ return;
+ } else if (end > begin) {
+ buffer_.DestructRange(&buffer_[begin], &buffer_[end]);
+ } else {
+ buffer_.DestructRange(&buffer_[begin], &buffer_[buffer_.capacity()]);
+ buffer_.DestructRange(&buffer_[0], &buffer_[end]);
+ }
+ }
+
+ // Makes room for |count| items starting at |*insert_begin|. Since iterators
+ // are not stable across buffer resizes, |*insert_begin| will be updated to
+ // point to the beginning of the newly opened position in the new array (it's
+ // in/out), and the end of the newly opened position (it's out-only).
+ void MakeRoomFor(size_t count, iterator* insert_begin, iterator* insert_end) {
+ if (count == 0) {
+ *insert_end = *insert_begin;
+ return;
+ }
+
+ // The offset from the beginning will be stable across reallocations.
+ size_t begin_offset = insert_begin->OffsetFromBegin();
+ ExpandCapacityIfNecessary(count);
+
+ insert_begin->index_ = (begin_ + begin_offset) % buffer_.capacity();
+ *insert_end =
+ iterator(this, (insert_begin->index_ + count) % buffer_.capacity());
+
+ // Update the new end and prepare the iterators for copying.
+ iterator src = end();
+ end_ = (end_ + count) % buffer_.capacity();
+ iterator dest = end();
+
+ // Move the elements. This will always involve shifting logically to the
+ // right, so move in a right-to-left order.
+ while (true) {
+ if (src == *insert_begin)
+ break;
+ --src;
+ --dest;
+ buffer_.MoveRange(&buffer_[src.index_], &buffer_[src.index_ + 1],
+ &buffer_[dest.index_]);
+ }
+ }
+
+#if DCHECK_IS_ON()
+ // Asserts the given index is dereferencable. The index is an index into the
+ // buffer, not an index used by operator[] or at() which will be offsets from
+ // begin.
+ void CheckValidIndex(size_t i) const {
+ if (begin_ <= end_)
+ DCHECK(i >= begin_ && i < end_);
+ else
+ DCHECK((i >= begin_ && i < buffer_.capacity()) || i < end_);
+ }
+
+ // Asserts the given index is either dereferencable or points to end().
+ void CheckValidIndexOrEnd(size_t i) const {
+ if (i != end_)
+ CheckValidIndex(i);
+ }
+
+ void ValidateIterator(const const_iterator& i) const {
+ DCHECK(i.parent_deque_ == this);
+ i.CheckUnstableUsage();
+ }
+
+ // See generation_ below.
+ void IncrementGeneration() { generation_++; }
+#else
+ // No-op versions of these functions for release builds.
+ void CheckValidIndex(size_t) const {}
+ void CheckValidIndexOrEnd(size_t) const {}
+ void ValidateIterator(const const_iterator& i) const {}
+ void IncrementGeneration() {}
+#endif
+
+ // Danger, the buffer_.capacity() is the "internal capacity" which is
+ // capacity() + 1 since there is an extra item to indicate the end. Otherwise
+ // being completely empty and completely full are indistinguishable (begin ==
+ // end). We could add a separate flag to avoid it, but that adds significant
+ // extra complexity since every computation will have to check for it. Always
+ // keeping one extra unused element in the buffer makes iterator computations
+ // much simpler.
+ //
+ // Container internal code will want to use buffer_.capacity() for offset
+ // computations rather than capacity().
+ VectorBuffer buffer_;
+ size_type begin_ = 0;
+ size_type end_ = 0;
+
+#if DCHECK_IS_ON()
+ // Incremented every time a modification is made that could affect iterator
+ // invalidations.
+ uint64_t generation_ = 0;
+#endif
+};
+
+// Implementations of base::Erase[If] (see base/stl_util.h).
+template <class T, class Value>
+void Erase(circular_deque<T>& container, const Value& value) {
+ container.erase(std::remove(container.begin(), container.end(), value),
+ container.end());
+}
+
+template <class T, class Predicate>
+void EraseIf(circular_deque<T>& container, Predicate pred) {
+ container.erase(std::remove_if(container.begin(), container.end(), pred),
+ container.end());
+}
+
+} // namespace base
+
+#endif // BASE_CONTAINERS_CIRCULAR_DEQUE_H_
diff --git a/security/sandbox/chromium/base/containers/span.h b/security/sandbox/chromium/base/containers/span.h
new file mode 100644
index 0000000000..ce2e3c47e5
--- /dev/null
+++ b/security/sandbox/chromium/base/containers/span.h
@@ -0,0 +1,530 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CONTAINERS_SPAN_H_
+#define BASE_CONTAINERS_SPAN_H_
+
+#include <stddef.h>
+
+#include <algorithm>
+#include <array>
+#include <iterator>
+#include <limits>
+#include <type_traits>
+#include <utility>
+
+#include "base/containers/checked_iterators.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/stl_util.h"
+
+namespace base {
+
+// [views.constants]
+constexpr size_t dynamic_extent = std::numeric_limits<size_t>::max();
+
+template <typename T, size_t Extent = dynamic_extent>
+class span;
+
+namespace internal {
+
+template <typename T>
+struct ExtentImpl : std::integral_constant<size_t, dynamic_extent> {};
+
+template <typename T, size_t N>
+struct ExtentImpl<T[N]> : std::integral_constant<size_t, N> {};
+
+template <typename T, size_t N>
+struct ExtentImpl<std::array<T, N>> : std::integral_constant<size_t, N> {};
+
+template <typename T, size_t N>
+struct ExtentImpl<base::span<T, N>> : std::integral_constant<size_t, N> {};
+
+template <typename T>
+using Extent = ExtentImpl<std::remove_cv_t<std::remove_reference_t<T>>>;
+
+template <typename T>
+struct IsSpanImpl : std::false_type {};
+
+template <typename T, size_t Extent>
+struct IsSpanImpl<span<T, Extent>> : std::true_type {};
+
+template <typename T>
+using IsSpan = IsSpanImpl<std::decay_t<T>>;
+
+template <typename T>
+struct IsStdArrayImpl : std::false_type {};
+
+template <typename T, size_t N>
+struct IsStdArrayImpl<std::array<T, N>> : std::true_type {};
+
+template <typename T>
+using IsStdArray = IsStdArrayImpl<std::decay_t<T>>;
+
+template <typename T>
+using IsCArray = std::is_array<std::remove_reference_t<T>>;
+
+template <typename From, typename To>
+using IsLegalDataConversion = std::is_convertible<From (*)[], To (*)[]>;
+
+template <typename Container, typename T>
+using ContainerHasConvertibleData = IsLegalDataConversion<
+ std::remove_pointer_t<decltype(base::data(std::declval<Container>()))>,
+ T>;
+
+template <typename Container>
+using ContainerHasIntegralSize =
+ std::is_integral<decltype(base::size(std::declval<Container>()))>;
+
+template <typename From, size_t FromExtent, typename To, size_t ToExtent>
+using EnableIfLegalSpanConversion =
+ std::enable_if_t<(ToExtent == dynamic_extent || ToExtent == FromExtent) &&
+ IsLegalDataConversion<From, To>::value>;
+
+// SFINAE check if Array can be converted to a span<T>.
+template <typename Array, typename T, size_t Extent>
+using EnableIfSpanCompatibleArray =
+ std::enable_if_t<(Extent == dynamic_extent ||
+ Extent == internal::Extent<Array>::value) &&
+ ContainerHasConvertibleData<Array, T>::value>;
+
+// SFINAE check if Container can be converted to a span<T>.
+template <typename Container, typename T>
+using IsSpanCompatibleContainer =
+ std::conditional_t<!IsSpan<Container>::value &&
+ !IsStdArray<Container>::value &&
+ !IsCArray<Container>::value &&
+ ContainerHasConvertibleData<Container, T>::value &&
+ ContainerHasIntegralSize<Container>::value,
+ std::true_type,
+ std::false_type>;
+
+template <typename Container, typename T>
+using EnableIfSpanCompatibleContainer =
+ std::enable_if_t<IsSpanCompatibleContainer<Container, T>::value>;
+
+template <typename Container, typename T, size_t Extent>
+using EnableIfSpanCompatibleContainerAndSpanIsDynamic =
+ std::enable_if_t<IsSpanCompatibleContainer<Container, T>::value &&
+ Extent == dynamic_extent>;
+
+// A helper template for storing the size of a span. Spans with static extents
+// don't require additional storage, since the extent itself is specified in the
+// template parameter.
+template <size_t Extent>
+class ExtentStorage {
+ public:
+ constexpr explicit ExtentStorage(size_t size) noexcept {}
+ constexpr size_t size() const noexcept { return Extent; }
+};
+
+// Specialization of ExtentStorage for dynamic extents, which do require
+// explicit storage for the size.
+template <>
+struct ExtentStorage<dynamic_extent> {
+ constexpr explicit ExtentStorage(size_t size) noexcept : size_(size) {}
+ constexpr size_t size() const noexcept { return size_; }
+
+ private:
+ size_t size_;
+};
+
+} // namespace internal
+
+// A span is a value type that represents an array of elements of type T. Since
+// it only consists of a pointer to memory with an associated size, it is very
+// light-weight. It is cheap to construct, copy, move and use spans, so that
+// users are encouraged to use it as a pass-by-value parameter. A span does not
+// own the underlying memory, so care must be taken to ensure that a span does
+// not outlive the backing store.
+//
+// span is somewhat analogous to StringPiece, but with arbitrary element types,
+// allowing mutation if T is non-const.
+//
+// span is implicitly convertible from C++ arrays, as well as most [1]
+// container-like types that provide a data() and size() method (such as
+// std::vector<T>). A mutable span<T> can also be implicitly converted to an
+// immutable span<const T>.
+//
+// Consider using a span for functions that take a data pointer and size
+// parameter: it allows the function to still act on an array-like type, while
+// allowing the caller code to be a bit more concise.
+//
+// For read-only data access pass a span<const T>: the caller can supply either
+// a span<const T> or a span<T>, while the callee will have a read-only view.
+// For read-write access a mutable span<T> is required.
+//
+// Without span:
+// Read-Only:
+// // std::string HexEncode(const uint8_t* data, size_t size);
+// std::vector<uint8_t> data_buffer = GenerateData();
+// std::string r = HexEncode(data_buffer.data(), data_buffer.size());
+//
+// Mutable:
+// // ssize_t SafeSNPrintf(char* buf, size_t N, const char* fmt, Args...);
+// char str_buffer[100];
+// SafeSNPrintf(str_buffer, sizeof(str_buffer), "Pi ~= %lf", 3.14);
+//
+// With span:
+// Read-Only:
+// // std::string HexEncode(base::span<const uint8_t> data);
+// std::vector<uint8_t> data_buffer = GenerateData();
+// std::string r = HexEncode(data_buffer);
+//
+// Mutable:
+// // ssize_t SafeSNPrintf(base::span<char>, const char* fmt, Args...);
+// char str_buffer[100];
+// SafeSNPrintf(str_buffer, "Pi ~= %lf", 3.14);
+//
+// Spans with "const" and pointers
+// -------------------------------
+//
+// Const and pointers can get confusing. Here are vectors of pointers and their
+// corresponding spans:
+//
+// const std::vector<int*> => base::span<int* const>
+// std::vector<const int*> => base::span<const int*>
+// const std::vector<const int*> => base::span<const int* const>
+//
+// Differences from the C++20 draft
+// --------------------------------
+//
+// http://eel.is/c++draft/views contains the latest C++20 draft of std::span.
+// Chromium tries to follow the draft as close as possible. Differences between
+// the draft and the implementation are documented in subsections below.
+//
+// Differences from [span.objectrep]:
+// - as_bytes() and as_writable_bytes() return spans of uint8_t instead of
+// std::byte (std::byte is a C++17 feature)
+//
+// Differences from [span.cons]:
+// - Constructing a static span (i.e. Extent != dynamic_extent) from a dynamic
+// sized container (e.g. std::vector) requires an explicit conversion (in the
+// C++20 draft this is simply UB)
+//
+// Differences from [span.obs]:
+// - empty() is marked with WARN_UNUSED_RESULT instead of [[nodiscard]]
+// ([[nodiscard]] is a C++17 feature)
+//
+// Furthermore, all constructors and methods are marked noexcept due to the lack
+// of exceptions in Chromium.
+//
+// Due to the lack of class template argument deduction guides in C++14
+// appropriate make_span() utility functions are provided.
+
+// [span], class template span
+template <typename T, size_t Extent>
+class span : public internal::ExtentStorage<Extent> {
+ private:
+ using ExtentStorage = internal::ExtentStorage<Extent>;
+
+ public:
+ using element_type = T;
+ using value_type = std::remove_cv_t<T>;
+ using size_type = size_t;
+ using difference_type = ptrdiff_t;
+ using pointer = T*;
+ using reference = T&;
+ using iterator = CheckedContiguousIterator<T>;
+ using const_iterator = CheckedContiguousConstIterator<T>;
+ using reverse_iterator = std::reverse_iterator<iterator>;
+ using const_reverse_iterator = std::reverse_iterator<const_iterator>;
+ static constexpr size_t extent = Extent;
+
+ // [span.cons], span constructors, copy, assignment, and destructor
+ constexpr span() noexcept : ExtentStorage(0), data_(nullptr) {
+ static_assert(Extent == dynamic_extent || Extent == 0, "Invalid Extent");
+ }
+
+ constexpr span(T* data, size_t size) noexcept
+ : ExtentStorage(size), data_(data) {
+ CHECK(Extent == dynamic_extent || Extent == size);
+ }
+
+ // Artificially templatized to break ambiguity for span(ptr, 0).
+ template <typename = void>
+ constexpr span(T* begin, T* end) noexcept : span(begin, end - begin) {
+ // Note: CHECK_LE is not constexpr, hence regular CHECK must be used.
+ CHECK(begin <= end);
+ }
+
+ template <
+ size_t N,
+ typename = internal::EnableIfSpanCompatibleArray<T (&)[N], T, Extent>>
+ constexpr span(T (&array)[N]) noexcept : span(base::data(array), N) {}
+
+ template <
+ size_t N,
+ typename = internal::
+ EnableIfSpanCompatibleArray<std::array<value_type, N>&, T, Extent>>
+ constexpr span(std::array<value_type, N>& array) noexcept
+ : span(base::data(array), N) {}
+
+ template <size_t N,
+ typename = internal::EnableIfSpanCompatibleArray<
+ const std::array<value_type, N>&,
+ T,
+ Extent>>
+ constexpr span(const std::array<value_type, N>& array) noexcept
+ : span(base::data(array), N) {}
+
+ // Conversion from a container that has compatible base::data() and integral
+ // base::size().
+ template <
+ typename Container,
+ typename =
+ internal::EnableIfSpanCompatibleContainerAndSpanIsDynamic<Container&,
+ T,
+ Extent>>
+ constexpr span(Container& container) noexcept
+ : span(base::data(container), base::size(container)) {}
+
+ template <
+ typename Container,
+ typename = internal::EnableIfSpanCompatibleContainerAndSpanIsDynamic<
+ const Container&,
+ T,
+ Extent>>
+ constexpr span(const Container& container) noexcept
+ : span(base::data(container), base::size(container)) {}
+
+ constexpr span(const span& other) noexcept = default;
+
+ // Conversions from spans of compatible types and extents: this allows a
+ // span<T> to be seamlessly used as a span<const T>, but not the other way
+ // around. If extent is not dynamic, OtherExtent has to be equal to Extent.
+ template <
+ typename U,
+ size_t OtherExtent,
+ typename =
+ internal::EnableIfLegalSpanConversion<U, OtherExtent, T, Extent>>
+ constexpr span(const span<U, OtherExtent>& other)
+ : span(other.data(), other.size()) {}
+
+ constexpr span& operator=(const span& other) noexcept = default;
+ ~span() noexcept = default;
+
+ // [span.sub], span subviews
+ template <size_t Count>
+ constexpr span<T, Count> first() const noexcept {
+ static_assert(Extent == dynamic_extent || Count <= Extent,
+ "Count must not exceed Extent");
+ CHECK(Extent != dynamic_extent || Count <= size());
+ return {data(), Count};
+ }
+
+ template <size_t Count>
+ constexpr span<T, Count> last() const noexcept {
+ static_assert(Extent == dynamic_extent || Count <= Extent,
+ "Count must not exceed Extent");
+ CHECK(Extent != dynamic_extent || Count <= size());
+ return {data() + (size() - Count), Count};
+ }
+
+ template <size_t Offset, size_t Count = dynamic_extent>
+ constexpr span<T,
+ (Count != dynamic_extent
+ ? Count
+ : (Extent != dynamic_extent ? Extent - Offset
+ : dynamic_extent))>
+ subspan() const noexcept {
+ static_assert(Extent == dynamic_extent || Offset <= Extent,
+ "Offset must not exceed Extent");
+ static_assert(Extent == dynamic_extent || Count == dynamic_extent ||
+ Count <= Extent - Offset,
+ "Count must not exceed Extent - Offset");
+ CHECK(Extent != dynamic_extent || Offset <= size());
+ CHECK(Extent != dynamic_extent || Count == dynamic_extent ||
+ Count <= size() - Offset);
+ return {data() + Offset, Count != dynamic_extent ? Count : size() - Offset};
+ }
+
+ constexpr span<T, dynamic_extent> first(size_t count) const noexcept {
+ // Note: CHECK_LE is not constexpr, hence regular CHECK must be used.
+ CHECK(count <= size());
+ return {data(), count};
+ }
+
+ constexpr span<T, dynamic_extent> last(size_t count) const noexcept {
+ // Note: CHECK_LE is not constexpr, hence regular CHECK must be used.
+ CHECK(count <= size());
+ return {data() + (size() - count), count};
+ }
+
+ constexpr span<T, dynamic_extent> subspan(size_t offset,
+ size_t count = dynamic_extent) const
+ noexcept {
+ // Note: CHECK_LE is not constexpr, hence regular CHECK must be used.
+ CHECK(offset <= size());
+ CHECK(count == dynamic_extent || count <= size() - offset);
+ return {data() + offset, count != dynamic_extent ? count : size() - offset};
+ }
+
+ // [span.obs], span observers
+ constexpr size_t size() const noexcept { return ExtentStorage::size(); }
+ constexpr size_t size_bytes() const noexcept { return size() * sizeof(T); }
+ constexpr bool empty() const noexcept WARN_UNUSED_RESULT {
+ return size() == 0;
+ }
+
+ // [span.elem], span element access
+ constexpr T& operator[](size_t idx) const noexcept {
+ // Note: CHECK_LT is not constexpr, hence regular CHECK must be used.
+ CHECK(idx < size());
+ return *(data() + idx);
+ }
+
+ constexpr T& front() const noexcept {
+ static_assert(Extent == dynamic_extent || Extent > 0,
+ "Extent must not be 0");
+ CHECK(Extent != dynamic_extent || !empty());
+ return *data();
+ }
+
+ constexpr T& back() const noexcept {
+ static_assert(Extent == dynamic_extent || Extent > 0,
+ "Extent must not be 0");
+ CHECK(Extent != dynamic_extent || !empty());
+ return *(data() + size() - 1);
+ }
+
+ constexpr T* data() const noexcept { return data_; }
+
+ // [span.iter], span iterator support
+ constexpr iterator begin() const noexcept {
+ return iterator(data_, data_ + size());
+ }
+ constexpr iterator end() const noexcept {
+ return iterator(data_, data_ + size(), data_ + size());
+ }
+
+ constexpr const_iterator cbegin() const noexcept { return begin(); }
+ constexpr const_iterator cend() const noexcept { return end(); }
+
+ constexpr reverse_iterator rbegin() const noexcept {
+ return reverse_iterator(end());
+ }
+ constexpr reverse_iterator rend() const noexcept {
+ return reverse_iterator(begin());
+ }
+
+ constexpr const_reverse_iterator crbegin() const noexcept {
+ return const_reverse_iterator(cend());
+ }
+ constexpr const_reverse_iterator crend() const noexcept {
+ return const_reverse_iterator(cbegin());
+ }
+
+ private:
+ T* data_;
+};
+
+// span<T, Extent>::extent can not be declared inline prior to C++17, hence this
+// definition is required.
+template <class T, size_t Extent>
+constexpr size_t span<T, Extent>::extent;
+
+// [span.objectrep], views of object representation
+template <typename T, size_t X>
+span<const uint8_t, (X == dynamic_extent ? dynamic_extent : sizeof(T) * X)>
+as_bytes(span<T, X> s) noexcept {
+ return {reinterpret_cast<const uint8_t*>(s.data()), s.size_bytes()};
+}
+
+template <typename T,
+ size_t X,
+ typename = std::enable_if_t<!std::is_const<T>::value>>
+span<uint8_t, (X == dynamic_extent ? dynamic_extent : sizeof(T) * X)>
+as_writable_bytes(span<T, X> s) noexcept {
+ return {reinterpret_cast<uint8_t*>(s.data()), s.size_bytes()};
+}
+
+// Type-deducing helpers for constructing a span.
+template <int&... ExplicitArgumentBarrier, typename T>
+constexpr span<T> make_span(T* data, size_t size) noexcept {
+ return {data, size};
+}
+
+template <int&... ExplicitArgumentBarrier, typename T>
+constexpr span<T> make_span(T* begin, T* end) noexcept {
+ return {begin, end};
+}
+
+// make_span utility function that deduces both the span's value_type and extent
+// from the passed in argument.
+//
+// Usage: auto span = base::make_span(...);
+template <int&... ExplicitArgumentBarrier, typename Container>
+constexpr auto make_span(Container&& container) noexcept {
+ using T =
+ std::remove_pointer_t<decltype(base::data(std::declval<Container>()))>;
+ using Extent = internal::Extent<Container>;
+ return span<T, Extent::value>(std::forward<Container>(container));
+}
+
+// make_span utility function that allows callers to explicit specify the span's
+// extent, the value_type is deduced automatically. This is useful when passing
+// a dynamically sized container to a method expecting static spans, when the
+// container is known to have the correct size.
+//
+// Note: This will CHECK that N indeed matches size(container).
+//
+// Usage: auto static_span = base::make_span<N>(...);
+template <size_t N, int&... ExplicitArgumentBarrier, typename Container>
+constexpr auto make_span(Container&& container) noexcept {
+ using T =
+ std::remove_pointer_t<decltype(base::data(std::declval<Container>()))>;
+ return span<T, N>(base::data(container), base::size(container));
+}
+
+} // namespace base
+
+// Note: std::tuple_size, std::tuple_element and std::get are specialized for
+// static spans, so that they can be used in C++17's structured bindings. While
+// we don't support C++17 yet, there is no harm in providing these
+// specializations already.
+namespace std {
+
+// [span.tuple], tuple interface
+#if defined(__clang__)
+// Due to https://llvm.org/PR39871 and https://llvm.org/PR41331 and their
+// respective fixes different versions of libc++ declare std::tuple_size and
+// std::tuple_element either as classes or structs. In order to be able to
+// specialize std::tuple_size and std::tuple_element for custom base types we
+// thus need to disable -Wmismatched-tags in order to support all build
+// configurations. Note that this is blessed by the standard in
+// https://timsong-cpp.github.io/cppwp/n4140/dcl.type.elab#3.
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wmismatched-tags"
+#endif
+template <typename T, size_t X>
+struct tuple_size<base::span<T, X>> : public integral_constant<size_t, X> {};
+
+template <typename T>
+struct tuple_size<base::span<T, base::dynamic_extent>>; // not defined
+
+template <size_t I, typename T, size_t X>
+struct tuple_element<I, base::span<T, X>> {
+ static_assert(
+ base::dynamic_extent != X,
+ "std::tuple_element<> not supported for base::span<T, dynamic_extent>");
+ static_assert(I < X,
+ "Index out of bounds in std::tuple_element<> (base::span)");
+ using type = T;
+};
+#if defined(__clang__)
+#pragma clang diagnostic pop // -Wmismatched-tags
+#endif
+
+template <size_t I, typename T, size_t X>
+constexpr T& get(base::span<T, X> s) noexcept {
+ static_assert(base::dynamic_extent != X,
+ "std::get<> not supported for base::span<T, dynamic_extent>");
+ static_assert(I < X, "Index out of bounds in std::get<> (base::span)");
+ return s[I];
+}
+
+} // namespace std
+
+#endif // BASE_CONTAINERS_SPAN_H_
diff --git a/security/sandbox/chromium/base/containers/stack.h b/security/sandbox/chromium/base/containers/stack.h
new file mode 100644
index 0000000000..5cf06f8251
--- /dev/null
+++ b/security/sandbox/chromium/base/containers/stack.h
@@ -0,0 +1,23 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CONTAINERS_STACK_H_
+#define BASE_CONTAINERS_STACK_H_
+
+#include <stack>
+
+#include "base/containers/circular_deque.h"
+
+namespace base {
+
+// Provides a definition of base::stack that's like std::stack but uses a
+// base::circular_deque instead of std::deque. Since std::stack is just a
+// wrapper for an underlying type, we can just provide a typedef for it that
+// defaults to the base circular_deque.
+template <class T, class Container = circular_deque<T>>
+using stack = std::stack<T, Container>;
+
+} // namespace base
+
+#endif // BASE_CONTAINERS_STACK_H_
diff --git a/security/sandbox/chromium/base/containers/util.h b/security/sandbox/chromium/base/containers/util.h
new file mode 100644
index 0000000000..435db0d1f8
--- /dev/null
+++ b/security/sandbox/chromium/base/containers/util.h
@@ -0,0 +1,21 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CONTAINERS_UTIL_H_
+#define BASE_CONTAINERS_UTIL_H_
+
+#include <stdint.h>
+
+namespace base {
+
+// TODO(crbug.com/817982): What we really need is for checked_math.h to be
+// able to do checked arithmetic on pointers.
+template <typename T>
+static inline uintptr_t get_uintptr(const T* t) {
+ return reinterpret_cast<uintptr_t>(t);
+}
+
+} // namespace base
+
+#endif // BASE_CONTAINERS_UTIL_H_
diff --git a/security/sandbox/chromium/base/containers/vector_buffer.h b/security/sandbox/chromium/base/containers/vector_buffer.h
new file mode 100644
index 0000000000..83cd2ac139
--- /dev/null
+++ b/security/sandbox/chromium/base/containers/vector_buffer.h
@@ -0,0 +1,188 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CONTAINERS_VECTOR_BUFFERS_H_
+#define BASE_CONTAINERS_VECTOR_BUFFERS_H_
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <type_traits>
+#include <utility>
+
+#include "base/containers/util.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/numerics/checked_math.h"
+
+namespace base {
+namespace internal {
+
+// Internal implementation detail of base/containers.
+//
+// Implements a vector-like buffer that holds a certain capacity of T. Unlike
+// std::vector, VectorBuffer never constructs or destructs its arguments, and
+// can't change sizes. But it does implement templates to assist in efficient
+// moving and destruction of those items manually.
+//
+// In particular, the destructor function does not iterate over the items if
+// there is no destructor. Moves should be implemented as a memcpy/memmove for
+// trivially copyable objects (POD) otherwise, it should be a std::move if
+// possible, and as a last resort it falls back to a copy. This behavior is
+// similar to std::vector.
+//
+// No special consideration is done for noexcept move constructors since
+// we compile without exceptions.
+//
+// The current API does not support moving overlapping ranges.
+template <typename T>
+class VectorBuffer {
+ public:
+ constexpr VectorBuffer() = default;
+
+#if defined(__clang__) && !defined(__native_client__)
+ // This constructor converts an uninitialized void* to a T* which triggers
+ // clang Control Flow Integrity. Since this is as-designed, disable.
+ __attribute__((no_sanitize("cfi-unrelated-cast", "vptr")))
+#endif
+ VectorBuffer(size_t count)
+ : buffer_(reinterpret_cast<T*>(
+ malloc(CheckMul(sizeof(T), count).ValueOrDie()))),
+ capacity_(count) {
+ }
+ VectorBuffer(VectorBuffer&& other) noexcept
+ : buffer_(other.buffer_), capacity_(other.capacity_) {
+ other.buffer_ = nullptr;
+ other.capacity_ = 0;
+ }
+
+ ~VectorBuffer() { free(buffer_); }
+
+ VectorBuffer& operator=(VectorBuffer&& other) {
+ free(buffer_);
+ buffer_ = other.buffer_;
+ capacity_ = other.capacity_;
+
+ other.buffer_ = nullptr;
+ other.capacity_ = 0;
+ return *this;
+ }
+
+ size_t capacity() const { return capacity_; }
+
+ T& operator[](size_t i) {
+ // TODO(crbug.com/817982): Some call sites (at least circular_deque.h) are
+ // calling this with `i == capacity_` as a way of getting `end()`. Therefore
+ // we have to allow this for now (`i <= capacity_`), until we fix those call
+ // sites to use real iterators. This comment applies here and to `const T&
+ // operator[]`, below.
+ CHECK_LE(i, capacity_);
+ return buffer_[i];
+ }
+
+ const T& operator[](size_t i) const {
+ CHECK_LE(i, capacity_);
+ return buffer_[i];
+ }
+
+ T* begin() { return buffer_; }
+ T* end() { return &buffer_[capacity_]; }
+
+ // DestructRange ------------------------------------------------------------
+
+ // Trivially destructible objects need not have their destructors called.
+ template <typename T2 = T,
+ typename std::enable_if<std::is_trivially_destructible<T2>::value,
+ int>::type = 0>
+ void DestructRange(T* begin, T* end) {}
+
+ // Non-trivially destructible objects must have their destructors called
+ // individually.
+ template <typename T2 = T,
+ typename std::enable_if<!std::is_trivially_destructible<T2>::value,
+ int>::type = 0>
+ void DestructRange(T* begin, T* end) {
+ CHECK_LE(begin, end);
+ while (begin != end) {
+ begin->~T();
+ begin++;
+ }
+ }
+
+ // MoveRange ----------------------------------------------------------------
+ //
+ // The destructor will be called (as necessary) for all moved types. The
+ // ranges must not overlap.
+ //
+ // The parameters and begin and end (one past the last) of the input buffer,
+ // and the address of the first element to copy to. There must be sufficient
+ // room in the destination for all items in the range [begin, end).
+
+ // Trivially copyable types can use memcpy. trivially copyable implies
+ // that there is a trivial destructor as we don't have to call it.
+ template <typename T2 = T,
+ typename std::enable_if<base::is_trivially_copyable<T2>::value,
+ int>::type = 0>
+ static void MoveRange(T* from_begin, T* from_end, T* to) {
+ CHECK(!RangesOverlap(from_begin, from_end, to));
+ memcpy(
+ to, from_begin,
+ CheckSub(get_uintptr(from_end), get_uintptr(from_begin)).ValueOrDie());
+ }
+
+ // Not trivially copyable, but movable: call the move constructor and
+ // destruct the original.
+ template <typename T2 = T,
+ typename std::enable_if<std::is_move_constructible<T2>::value &&
+ !base::is_trivially_copyable<T2>::value,
+ int>::type = 0>
+ static void MoveRange(T* from_begin, T* from_end, T* to) {
+ CHECK(!RangesOverlap(from_begin, from_end, to));
+ while (from_begin != from_end) {
+ new (to) T(std::move(*from_begin));
+ from_begin->~T();
+ from_begin++;
+ to++;
+ }
+ }
+
+ // Not movable, not trivially copyable: call the copy constructor and
+ // destruct the original.
+ template <typename T2 = T,
+ typename std::enable_if<!std::is_move_constructible<T2>::value &&
+ !base::is_trivially_copyable<T2>::value,
+ int>::type = 0>
+ static void MoveRange(T* from_begin, T* from_end, T* to) {
+ CHECK(!RangesOverlap(from_begin, from_end, to));
+ while (from_begin != from_end) {
+ new (to) T(*from_begin);
+ from_begin->~T();
+ from_begin++;
+ to++;
+ }
+ }
+
+ private:
+ static bool RangesOverlap(const T* from_begin,
+ const T* from_end,
+ const T* to) {
+ const auto from_begin_uintptr = get_uintptr(from_begin);
+ const auto from_end_uintptr = get_uintptr(from_end);
+ const auto to_uintptr = get_uintptr(to);
+ return !(
+ to >= from_end ||
+ CheckAdd(to_uintptr, CheckSub(from_end_uintptr, from_begin_uintptr))
+ .ValueOrDie() <= from_begin_uintptr);
+ }
+
+ T* buffer_ = nullptr;
+ size_t capacity_ = 0;
+
+ DISALLOW_COPY_AND_ASSIGN(VectorBuffer);
+};
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_CONTAINERS_VECTOR_BUFFERS_H_