summaryrefslogtreecommitdiffstats
path: root/gfx/angle/checkout/src/common/third_party
diff options
context:
space:
mode:
Diffstat (limited to 'gfx/angle/checkout/src/common/third_party')
-rw-r--r--gfx/angle/checkout/src/common/third_party/base/anglebase/base_export.h13
-rw-r--r--gfx/angle/checkout/src/common/third_party/base/anglebase/containers/mru_cache.h275
-rw-r--r--gfx/angle/checkout/src/common/third_party/base/anglebase/logging.h26
-rw-r--r--gfx/angle/checkout/src/common/third_party/base/anglebase/macros.h17
-rw-r--r--gfx/angle/checkout/src/common/third_party/base/anglebase/no_destructor.h106
-rw-r--r--gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/checked_math.h384
-rw-r--r--gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/checked_math_impl.h641
-rw-r--r--gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/clamped_math.h270
-rw-r--r--gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/clamped_math_impl.h368
-rw-r--r--gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/math_constants.h20
-rw-r--r--gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/ranges.h39
-rw-r--r--gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_conversions.h403
-rw-r--r--gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_conversions_arm_impl.h60
-rw-r--r--gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_conversions_impl.h893
-rw-r--r--gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_math.h12
-rw-r--r--gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_math_arm_impl.h131
-rw-r--r--gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_math_clang_gcc_impl.h182
-rw-r--r--gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_math_shared_impl.h227
-rw-r--r--gfx/angle/checkout/src/common/third_party/base/anglebase/sha1.cc245
-rw-r--r--gfx/angle/checkout/src/common/third_party/base/anglebase/sha1.h36
-rw-r--r--gfx/angle/checkout/src/common/third_party/base/anglebase/sys_byteorder.h49
-rw-r--r--gfx/angle/checkout/src/common/third_party/smhasher/src/PMurHash.cpp339
-rw-r--r--gfx/angle/checkout/src/common/third_party/smhasher/src/PMurHash.h57
-rw-r--r--gfx/angle/checkout/src/common/third_party/xxhash/xxhash.c1030
-rw-r--r--gfx/angle/checkout/src/common/third_party/xxhash/xxhash.h341
25 files changed, 6164 insertions, 0 deletions
diff --git a/gfx/angle/checkout/src/common/third_party/base/anglebase/base_export.h b/gfx/angle/checkout/src/common/third_party/base/anglebase/base_export.h
new file mode 100644
index 0000000000..426047a992
--- /dev/null
+++ b/gfx/angle/checkout/src/common/third_party/base/anglebase/base_export.h
@@ -0,0 +1,13 @@
+//
+// Copyright 2017 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// base_export.h: Compatiblity hacks for importing Chromium's base/SHA1.
+
+#ifndef ANGLEBASE_BASE_EXPORT_H_
+#define ANGLEBASE_BASE_EXPORT_H_
+
+#define ANGLEBASE_EXPORT
+
+#endif // ANGLEBASE_BASE_EXPORT_H_
diff --git a/gfx/angle/checkout/src/common/third_party/base/anglebase/containers/mru_cache.h b/gfx/angle/checkout/src/common/third_party/base/anglebase/containers/mru_cache.h
new file mode 100644
index 0000000000..30b564aff6
--- /dev/null
+++ b/gfx/angle/checkout/src/common/third_party/base/anglebase/containers/mru_cache.h
@@ -0,0 +1,275 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains a template for a Most Recently Used cache that allows
+// constant-time access to items using a key, but easy identification of the
+// least-recently-used items for removal. Each key can only be associated with
+// one payload item at a time.
+//
+// The key object will be stored twice, so it should support efficient copying.
+//
+// NOTE: While all operations are O(1), this code is written for
+// legibility rather than optimality. If future profiling identifies this as
+// a bottleneck, there is room for smaller values of 1 in the O(1). :]
+
+#ifndef ANGLEBASE_CONTAINERS_MRU_CACHE_H_
+#define ANGLEBASE_CONTAINERS_MRU_CACHE_H_
+
+#include <stddef.h>
+
+#include <algorithm>
+#include <functional>
+#include <list>
+#include <map>
+#include <unordered_map>
+#include <utility>
+
+#include "anglebase/logging.h"
+#include "anglebase/macros.h"
+
+namespace angle
+{
+
+namespace base
+{
+
+// MRUCacheBase ----------------------------------------------------------------
+
+// This template is used to standardize map type containers that can be used
+// by MRUCacheBase. This level of indirection is necessary because of the way
+// that template template params and default template params interact.
+template <class KeyType, class ValueType, class CompareType>
+struct MRUCacheStandardMap
+{
+ typedef std::map<KeyType, ValueType, CompareType> Type;
+};
+
+// Base class for the MRU cache specializations defined below.
+template <class KeyType,
+ class PayloadType,
+ class HashOrCompareType,
+ template <typename, typename, typename> class MapType = MRUCacheStandardMap>
+class MRUCacheBase
+{
+ public:
+ // The payload of the list. This maintains a copy of the key so we can
+ // efficiently delete things given an element of the list.
+ typedef std::pair<KeyType, PayloadType> value_type;
+
+ private:
+ typedef std::list<value_type> PayloadList;
+ typedef
+ typename MapType<KeyType, typename PayloadList::iterator, HashOrCompareType>::Type KeyIndex;
+
+ public:
+ typedef typename PayloadList::size_type size_type;
+
+ typedef typename PayloadList::iterator iterator;
+ typedef typename PayloadList::const_iterator const_iterator;
+ typedef typename PayloadList::reverse_iterator reverse_iterator;
+ typedef typename PayloadList::const_reverse_iterator const_reverse_iterator;
+
+ enum
+ {
+ NO_AUTO_EVICT = 0
+ };
+
+ // The max_size is the size at which the cache will prune its members to when
+ // a new item is inserted. If the caller wants to manager this itself (for
+ // example, maybe it has special work to do when something is evicted), it
+ // can pass NO_AUTO_EVICT to not restrict the cache size.
+ explicit MRUCacheBase(size_type max_size) : max_size_(max_size) {}
+
+ virtual ~MRUCacheBase() {}
+
+ size_type max_size() const { return max_size_; }
+
+ // Inserts a payload item with the given key. If an existing item has
+ // the same key, it is removed prior to insertion. An iterator indicating the
+ // inserted item will be returned (this will always be the front of the list).
+ //
+ // The payload will be forwarded.
+ template <typename Payload>
+ iterator Put(const KeyType &key, Payload &&payload)
+ {
+ // Remove any existing payload with that key.
+ typename KeyIndex::iterator index_iter = index_.find(key);
+ if (index_iter != index_.end())
+ {
+ // Erase the reference to it. The index reference will be replaced in the
+ // code below.
+ Erase(index_iter->second);
+ }
+ else if (max_size_ != NO_AUTO_EVICT)
+ {
+ // New item is being inserted which might make it larger than the maximum
+ // size: kick the oldest thing out if necessary.
+ ShrinkToSize(max_size_ - 1);
+ }
+
+ ordering_.emplace_front(key, std::forward<Payload>(payload));
+ index_.emplace(key, ordering_.begin());
+ return ordering_.begin();
+ }
+
+ // Retrieves the contents of the given key, or end() if not found. This method
+ // has the side effect of moving the requested item to the front of the
+ // recency list.
+ iterator Get(const KeyType &key)
+ {
+ typename KeyIndex::iterator index_iter = index_.find(key);
+ if (index_iter == index_.end())
+ return end();
+ typename PayloadList::iterator iter = index_iter->second;
+
+ // Move the touched item to the front of the recency ordering.
+ ordering_.splice(ordering_.begin(), ordering_, iter);
+ return ordering_.begin();
+ }
+
+ // Retrieves the payload associated with a given key and returns it via
+ // result without affecting the ordering (unlike Get).
+ iterator Peek(const KeyType &key)
+ {
+ typename KeyIndex::const_iterator index_iter = index_.find(key);
+ if (index_iter == index_.end())
+ return end();
+ return index_iter->second;
+ }
+
+ const_iterator Peek(const KeyType &key) const
+ {
+ typename KeyIndex::const_iterator index_iter = index_.find(key);
+ if (index_iter == index_.end())
+ return end();
+ return index_iter->second;
+ }
+
+ // Exchanges the contents of |this| by the contents of the |other|.
+ void Swap(MRUCacheBase &other)
+ {
+ ordering_.swap(other.ordering_);
+ index_.swap(other.index_);
+ std::swap(max_size_, other.max_size_);
+ }
+
+ // Erases the item referenced by the given iterator. An iterator to the item
+ // following it will be returned. The iterator must be valid.
+ iterator Erase(iterator pos)
+ {
+ index_.erase(pos->first);
+ return ordering_.erase(pos);
+ }
+
+ // MRUCache entries are often processed in reverse order, so we add this
+ // convenience function (not typically defined by STL containers).
+ reverse_iterator Erase(reverse_iterator pos)
+ {
+ // We have to actually give it the incremented iterator to delete, since
+ // the forward iterator that base() returns is actually one past the item
+ // being iterated over.
+ return reverse_iterator(Erase((++pos).base()));
+ }
+
+ // Shrinks the cache so it only holds |new_size| items. If |new_size| is
+ // bigger or equal to the current number of items, this will do nothing.
+ void ShrinkToSize(size_type new_size)
+ {
+ for (size_type i = size(); i > new_size; i--)
+ Erase(rbegin());
+ }
+
+ // Deletes everything from the cache.
+ void Clear()
+ {
+ index_.clear();
+ ordering_.clear();
+ }
+
+ // Returns the number of elements in the cache.
+ size_type size() const
+ {
+ // We don't use ordering_.size() for the return value because
+ // (as a linked list) it can be O(n).
+ DCHECK(index_.size() == ordering_.size());
+ return index_.size();
+ }
+
+ // Allows iteration over the list. Forward iteration starts with the most
+ // recent item and works backwards.
+ //
+ // Note that since these iterators are actually iterators over a list, you
+ // can keep them as you insert or delete things (as long as you don't delete
+ // the one you are pointing to) and they will still be valid.
+ iterator begin() { return ordering_.begin(); }
+ const_iterator begin() const { return ordering_.begin(); }
+ iterator end() { return ordering_.end(); }
+ const_iterator end() const { return ordering_.end(); }
+
+ reverse_iterator rbegin() { return ordering_.rbegin(); }
+ const_reverse_iterator rbegin() const { return ordering_.rbegin(); }
+ reverse_iterator rend() { return ordering_.rend(); }
+ const_reverse_iterator rend() const { return ordering_.rend(); }
+
+ bool empty() const { return ordering_.empty(); }
+
+ private:
+ PayloadList ordering_;
+ KeyIndex index_;
+
+ size_type max_size_;
+
+ DISALLOW_COPY_AND_ASSIGN(MRUCacheBase);
+};
+
+// MRUCache --------------------------------------------------------------------
+
+// A container that does not do anything to free its data. Use this when storing
+// value types (as opposed to pointers) in the list.
+template <class KeyType, class PayloadType, class CompareType = std::less<KeyType>>
+class MRUCache : public MRUCacheBase<KeyType, PayloadType, CompareType>
+{
+ private:
+ using ParentType = MRUCacheBase<KeyType, PayloadType, CompareType>;
+
+ public:
+ // See MRUCacheBase, noting the possibility of using NO_AUTO_EVICT.
+ explicit MRUCache(typename ParentType::size_type max_size) : ParentType(max_size) {}
+ virtual ~MRUCache() {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MRUCache);
+};
+
+// HashingMRUCache ------------------------------------------------------------
+
+template <class KeyType, class ValueType, class HashType>
+struct MRUCacheHashMap
+{
+ typedef std::unordered_map<KeyType, ValueType, HashType> Type;
+};
+
+// This class is similar to MRUCache, except that it uses std::unordered_map as
+// the map type instead of std::map. Note that your KeyType must be hashable to
+// use this cache or you need to provide a hashing class.
+template <class KeyType, class PayloadType, class HashType = std::hash<KeyType>>
+class HashingMRUCache : public MRUCacheBase<KeyType, PayloadType, HashType, MRUCacheHashMap>
+{
+ private:
+ using ParentType = MRUCacheBase<KeyType, PayloadType, HashType, MRUCacheHashMap>;
+
+ public:
+ // See MRUCacheBase, noting the possibility of using NO_AUTO_EVICT.
+ explicit HashingMRUCache(typename ParentType::size_type max_size) : ParentType(max_size) {}
+ virtual ~HashingMRUCache() override {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HashingMRUCache);
+};
+
+} // namespace base
+
+} // namespace angle
+
+#endif // ANGLEBASE_CONTAINERS_MRU_CACHE_H_
diff --git a/gfx/angle/checkout/src/common/third_party/base/anglebase/logging.h b/gfx/angle/checkout/src/common/third_party/base/anglebase/logging.h
new file mode 100644
index 0000000000..73f81e87f2
--- /dev/null
+++ b/gfx/angle/checkout/src/common/third_party/base/anglebase/logging.h
@@ -0,0 +1,26 @@
+//
+// Copyright 2016 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// logging.h: Compatiblity hacks for importing Chromium's base/numerics.
+
+#ifndef ANGLEBASE_LOGGING_H_
+#define ANGLEBASE_LOGGING_H_
+
+#include "common/debug.h"
+
+#ifndef DCHECK
+# define DCHECK(X) ASSERT(X)
+#endif
+
+#ifndef CHECK
+# define CHECK(X) ASSERT(X)
+#endif
+
+// Unfortunately ANGLE relies on ASSERT being an empty statement, which these libs don't respect.
+#ifndef NOTREACHED
+# define NOTREACHED() ({ UNREACHABLE(); })
+#endif
+
+#endif // ANGLEBASE_LOGGING_H_
diff --git a/gfx/angle/checkout/src/common/third_party/base/anglebase/macros.h b/gfx/angle/checkout/src/common/third_party/base/anglebase/macros.h
new file mode 100644
index 0000000000..06391784e4
--- /dev/null
+++ b/gfx/angle/checkout/src/common/third_party/base/anglebase/macros.h
@@ -0,0 +1,17 @@
+//
+// Copyright 2017 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// macros.h: Compatiblity hacks for importing Chromium's MRUCache.
+
+#ifndef ANGLEBASE_MACROS_H_
+#define ANGLEBASE_MACROS_H_
+
+// A macro to disallow the copy constructor and operator= functions.
+// This should be used in the private: declarations for a class.
+#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
+ TypeName(const TypeName &) = delete; \
+ void operator=(const TypeName &) = delete
+
+#endif // ANGLEBASE_MACROS_H_
diff --git a/gfx/angle/checkout/src/common/third_party/base/anglebase/no_destructor.h b/gfx/angle/checkout/src/common/third_party/base/anglebase/no_destructor.h
new file mode 100644
index 0000000000..5090dd9817
--- /dev/null
+++ b/gfx/angle/checkout/src/common/third_party/base/anglebase/no_destructor.h
@@ -0,0 +1,106 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef ANGLEBASE_NO_DESTRUCTOR_H_
+#define ANGLEBASE_NO_DESTRUCTOR_H_
+
+#include <new>
+#include <utility>
+
+namespace angle
+{
+
+namespace base
+{
+
+// A wrapper that makes it easy to create an object of type T with static
+// storage duration that:
+// - is only constructed on first access
+// - never invokes the destructor
+// in order to satisfy the styleguide ban on global constructors and
+// destructors.
+//
+// Runtime constant example:
+// const std::string& GetLineSeparator() {
+// // Forwards to std::string(size_t, char, const Allocator&) constructor.
+// static const base::NoDestructor<std::string> s(5, '-');
+// return *s;
+// }
+//
+// More complex initialization with a lambda:
+// const std::string& GetSessionNonce() {
+// static const base::NoDestructor<std::string> nonce([] {
+// std::string s(16);
+// crypto::RandString(s.data(), s.size());
+// return s;
+// }());
+// return *nonce;
+// }
+//
+// NoDestructor<T> stores the object inline, so it also avoids a pointer
+// indirection and a malloc. Also note that since C++11 static local variable
+// initialization is thread-safe and so is this pattern. Code should prefer to
+// use NoDestructor<T> over:
+// - A function scoped static T* or T& that is dynamically initialized.
+// - A global base::LazyInstance<T>.
+//
+// Note that since the destructor is never run, this *will* leak memory if used
+// as a stack or member variable. Furthermore, a NoDestructor<T> should never
+// have global scope as that may require a static initializer.
+template <typename T>
+class NoDestructor
+{
+ public:
+ // Not constexpr; just write static constexpr T x = ...; if the value should
+ // be a constexpr.
+ template <typename... Args>
+ explicit NoDestructor(Args &&... args)
+ {
+ new (storage_) T(std::forward<Args>(args)...);
+ }
+
+ // Allows copy and move construction of the contained type, to allow
+ // construction from an initializer list, e.g. for std::vector.
+ explicit NoDestructor(const T &x) { new (storage_) T(x); }
+ explicit NoDestructor(T &&x) { new (storage_) T(std::move(x)); }
+
+ NoDestructor(const NoDestructor &) = delete;
+ NoDestructor &operator=(const NoDestructor &) = delete;
+
+ ~NoDestructor() = default;
+
+ const T &operator*() const { return *get(); }
+ T &operator*() { return *get(); }
+
+ const T *operator->() const { return get(); }
+ T *operator->() { return get(); }
+
+ const T *get() const { return reinterpret_cast<const T *>(storage_); }
+ T *get() { return reinterpret_cast<T *>(storage_); }
+
+ private:
+ alignas(T) char storage_[sizeof(T)];
+
+#if defined(LEAK_SANITIZER)
+ // TODO(https://crbug.com/812277): This is a hack to work around the fact
+ // that LSan doesn't seem to treat NoDestructor as a root for reachability
+ // analysis. This means that code like this:
+ // static base::NoDestructor<std::vector<int>> v({1, 2, 3});
+ // is considered a leak. Using the standard leak sanitizer annotations to
+ // suppress leaks doesn't work: std::vector is implicitly constructed before
+ // calling the base::NoDestructor constructor.
+ //
+ // Unfortunately, I haven't been able to demonstrate this issue in simpler
+ // reproductions: until that's resolved, hold an explicit pointer to the
+ // placement-new'd object in leak sanitizer mode to help LSan realize that
+ // objects allocated by the contained type are still reachable.
+ T *storage_ptr_ = reinterpret_cast<T *>(storage_);
+#endif // defined(LEAK_SANITIZER)
+};
+
+} // namespace base
+
+} // namespace angle
+
+#endif // ANGLEBASE_NO_DESTRUCTOR_H_
diff --git a/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/checked_math.h b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/checked_math.h
new file mode 100644
index 0000000000..18bceb7468
--- /dev/null
+++ b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/checked_math.h
@@ -0,0 +1,384 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NUMERICS_CHECKED_MATH_H_
+#define BASE_NUMERICS_CHECKED_MATH_H_
+
+#include <stddef.h>
+
+#include <limits>
+#include <type_traits>
+
+#include "anglebase/numerics/checked_math_impl.h"
+
+namespace angle
+{
+namespace base
+{
+namespace internal
+{
+
+template <typename T>
+class CheckedNumeric
+{
+ static_assert(std::is_arithmetic<T>::value, "CheckedNumeric<T>: T must be a numeric type.");
+
+ public:
+ template <typename Src>
+ friend class CheckedNumeric;
+
+ using type = T;
+
+ constexpr CheckedNumeric() = default;
+
+ // Copy constructor.
+ template <typename Src>
+ constexpr CheckedNumeric(const CheckedNumeric<Src> &rhs)
+ : state_(rhs.state_.value(), rhs.IsValid())
+ {}
+
+ // This is not an explicit constructor because we implicitly upgrade regular
+ // numerics to CheckedNumerics to make them easier to use.
+ template <typename Src>
+ constexpr CheckedNumeric(Src value) // NOLINT(runtime/explicit)
+ : state_(value)
+ {
+ static_assert(std::is_arithmetic<Src>::value, "Argument must be numeric.");
+ }
+
+ // This is not an explicit constructor because we want a seamless conversion
+ // from StrictNumeric types.
+ template <typename Src>
+ constexpr CheckedNumeric(StrictNumeric<Src> value) // NOLINT(runtime/explicit)
+ : state_(static_cast<Src>(value))
+ {}
+
+ // IsValid() - The public API to test if a CheckedNumeric is currently valid.
+ // A range checked destination type can be supplied using the Dst template
+ // parameter.
+ template <typename Dst = T>
+ constexpr bool IsValid() const
+ {
+ return state_.is_valid() && IsValueInRangeForNumericType<Dst>(state_.value());
+ }
+
+ // AssignIfValid(Dst) - Assigns the underlying value if it is currently valid
+ // and is within the range supported by the destination type. Returns true if
+ // successful and false otherwise.
+ template <typename Dst>
+#if defined(__clang__) || defined(__GNUC__)
+ __attribute__((warn_unused_result))
+#elif defined(_MSC_VER)
+ _Check_return_
+#endif
+ constexpr bool
+ AssignIfValid(Dst *result) const
+ {
+ return BASE_NUMERICS_LIKELY(IsValid<Dst>())
+ ? ((*result = static_cast<Dst>(state_.value())), true)
+ : false;
+ }
+
+ // ValueOrDie() - The primary accessor for the underlying value. If the
+ // current state is not valid it will CHECK and crash.
+ // A range checked destination type can be supplied using the Dst template
+ // parameter, which will trigger a CHECK if the value is not in bounds for
+ // the destination.
+ // The CHECK behavior can be overridden by supplying a handler as a
+ // template parameter, for test code, etc. However, the handler cannot access
+ // the underlying value, and it is not available through other means.
+ template <typename Dst = T, class CheckHandler = CheckOnFailure>
+ constexpr StrictNumeric<Dst> ValueOrDie() const
+ {
+ return BASE_NUMERICS_LIKELY(IsValid<Dst>()) ? static_cast<Dst>(state_.value())
+ : CheckHandler::template HandleFailure<Dst>();
+ }
+
+ // ValueOrDefault(T default_value) - A convenience method that returns the
+ // current value if the state is valid, and the supplied default_value for
+ // any other state.
+ // A range checked destination type can be supplied using the Dst template
+ // parameter. WARNING: This function may fail to compile or CHECK at runtime
+ // if the supplied default_value is not within range of the destination type.
+ template <typename Dst = T, typename Src>
+ constexpr StrictNumeric<Dst> ValueOrDefault(const Src default_value) const
+ {
+ return BASE_NUMERICS_LIKELY(IsValid<Dst>()) ? static_cast<Dst>(state_.value())
+ : checked_cast<Dst>(default_value);
+ }
+
+ // Returns a checked numeric of the specified type, cast from the current
+ // CheckedNumeric. If the current state is invalid or the destination cannot
+ // represent the result then the returned CheckedNumeric will be invalid.
+ template <typename Dst>
+ constexpr CheckedNumeric<typename UnderlyingType<Dst>::type> Cast() const
+ {
+ return *this;
+ }
+
+ // This friend method is available solely for providing more detailed logging
+ // in the tests. Do not implement it in production code, because the
+ // underlying values may change at any time.
+ template <typename U>
+ friend U GetNumericValueForTest(const CheckedNumeric<U> &src);
+
+ // Prototypes for the supported arithmetic operator overloads.
+ template <typename Src>
+ constexpr CheckedNumeric &operator+=(const Src rhs);
+ template <typename Src>
+ constexpr CheckedNumeric &operator-=(const Src rhs);
+ template <typename Src>
+ constexpr CheckedNumeric &operator*=(const Src rhs);
+ template <typename Src>
+ constexpr CheckedNumeric &operator/=(const Src rhs);
+ template <typename Src>
+ constexpr CheckedNumeric &operator%=(const Src rhs);
+ template <typename Src>
+ constexpr CheckedNumeric &operator<<=(const Src rhs);
+ template <typename Src>
+ constexpr CheckedNumeric &operator>>=(const Src rhs);
+ template <typename Src>
+ constexpr CheckedNumeric &operator&=(const Src rhs);
+ template <typename Src>
+ constexpr CheckedNumeric &operator|=(const Src rhs);
+ template <typename Src>
+ constexpr CheckedNumeric &operator^=(const Src rhs);
+
+ constexpr CheckedNumeric operator-() const
+ {
+ // Use an optimized code path for a known run-time variable.
+ if (!MustTreatAsConstexpr(state_.value()) && std::is_signed<T>::value &&
+ std::is_floating_point<T>::value)
+ {
+ return FastRuntimeNegate();
+ }
+ // The negation of two's complement int min is int min.
+ const bool is_valid =
+ IsValid() && (!std::is_signed<T>::value || std::is_floating_point<T>::value ||
+ NegateWrapper(state_.value()) != std::numeric_limits<T>::lowest());
+ return CheckedNumeric<T>(NegateWrapper(state_.value()), is_valid);
+ }
+
+ constexpr CheckedNumeric operator~() const
+ {
+ return CheckedNumeric<decltype(InvertWrapper(T()))>(InvertWrapper(state_.value()),
+ IsValid());
+ }
+
+ constexpr CheckedNumeric Abs() const
+ {
+ return !IsValueNegative(state_.value()) ? *this : -*this;
+ }
+
+ template <typename U>
+ constexpr CheckedNumeric<typename MathWrapper<CheckedMaxOp, T, U>::type> Max(const U rhs) const
+ {
+ return CheckMax(*this, rhs);
+ }
+
+ template <typename U>
+ constexpr CheckedNumeric<typename MathWrapper<CheckedMinOp, T, U>::type> Min(const U rhs) const
+ {
+ return CheckMin(*this, rhs);
+ }
+
+ // This function is available only for integral types. It returns an unsigned
+ // integer of the same width as the source type, containing the absolute value
+ // of the source, and properly handling signed min.
+ constexpr CheckedNumeric<typename UnsignedOrFloatForSize<T>::type> UnsignedAbs() const
+ {
+ return CheckedNumeric<typename UnsignedOrFloatForSize<T>::type>(
+ SafeUnsignedAbs(state_.value()), state_.is_valid());
+ }
+
+ constexpr CheckedNumeric &operator++()
+ {
+ *this += 1;
+ return *this;
+ }
+
+ constexpr CheckedNumeric operator++(int)
+ {
+ CheckedNumeric value = *this;
+ *this += 1;
+ return value;
+ }
+
+ constexpr CheckedNumeric &operator--()
+ {
+ *this -= 1;
+ return *this;
+ }
+
+ constexpr CheckedNumeric operator--(int)
+ {
+ // TODO(pkasting): Consider std::exchange() once it's constexpr in C++20.
+ const CheckedNumeric value = *this;
+ *this -= 1;
+ return value;
+ }
+
+ // These perform the actual math operations on the CheckedNumerics.
+ // Binary arithmetic operations.
+ template <template <typename, typename, typename> class M, typename L, typename R>
+ static constexpr CheckedNumeric MathOp(const L lhs, const R rhs)
+ {
+ using Math = typename MathWrapper<M, L, R>::math;
+ T result = 0;
+ const bool is_valid = Wrapper<L>::is_valid(lhs) && Wrapper<R>::is_valid(rhs) &&
+ Math::Do(Wrapper<L>::value(lhs), Wrapper<R>::value(rhs), &result);
+ return CheckedNumeric<T>(result, is_valid);
+ }
+
+ // Assignment arithmetic operations.
+ template <template <typename, typename, typename> class M, typename R>
+ constexpr CheckedNumeric &MathOp(const R rhs)
+ {
+ using Math = typename MathWrapper<M, T, R>::math;
+ T result = 0; // Using T as the destination saves a range check.
+ const bool is_valid = state_.is_valid() && Wrapper<R>::is_valid(rhs) &&
+ Math::Do(state_.value(), Wrapper<R>::value(rhs), &result);
+ *this = CheckedNumeric<T>(result, is_valid);
+ return *this;
+ }
+
+ private:
+ CheckedNumericState<T> state_;
+
+ CheckedNumeric FastRuntimeNegate() const
+ {
+ T result;
+ const bool success = CheckedSubOp<T, T>::Do(T(0), state_.value(), &result);
+ return CheckedNumeric<T>(result, IsValid() && success);
+ }
+
+ template <typename Src>
+ constexpr CheckedNumeric(Src value, bool is_valid) : state_(value, is_valid)
+ {}
+
+ // These wrappers allow us to handle state the same way for both
+ // CheckedNumeric and POD arithmetic types.
+ template <typename Src>
+ struct Wrapper
+ {
+ static constexpr bool is_valid(Src) { return true; }
+ static constexpr Src value(Src value) { return value; }
+ };
+
+ template <typename Src>
+ struct Wrapper<CheckedNumeric<Src>>
+ {
+ static constexpr bool is_valid(const CheckedNumeric<Src> v) { return v.IsValid(); }
+ static constexpr Src value(const CheckedNumeric<Src> v) { return v.state_.value(); }
+ };
+
+ template <typename Src>
+ struct Wrapper<StrictNumeric<Src>>
+ {
+ static constexpr bool is_valid(const StrictNumeric<Src>) { return true; }
+ static constexpr Src value(const StrictNumeric<Src> v) { return static_cast<Src>(v); }
+ };
+};
+
+// Convenience functions to avoid the ugly template disambiguator syntax.
+template <typename Dst, typename Src>
+constexpr bool IsValidForType(const CheckedNumeric<Src> value)
+{
+ return value.template IsValid<Dst>();
+}
+
+template <typename Dst, typename Src>
+constexpr StrictNumeric<Dst> ValueOrDieForType(const CheckedNumeric<Src> value)
+{
+ return value.template ValueOrDie<Dst>();
+}
+
+template <typename Dst, typename Src, typename Default>
+constexpr StrictNumeric<Dst> ValueOrDefaultForType(const CheckedNumeric<Src> value,
+ const Default default_value)
+{
+ return value.template ValueOrDefault<Dst>(default_value);
+}
+
+// Convience wrapper to return a new CheckedNumeric from the provided arithmetic
+// or CheckedNumericType.
+template <typename T>
+constexpr CheckedNumeric<typename UnderlyingType<T>::type> MakeCheckedNum(const T value)
+{
+ return value;
+}
+
+// These implement the variadic wrapper for the math operations.
+template <template <typename, typename, typename> class M, typename L, typename R>
+constexpr CheckedNumeric<typename MathWrapper<M, L, R>::type> CheckMathOp(const L lhs, const R rhs)
+{
+ using Math = typename MathWrapper<M, L, R>::math;
+ return CheckedNumeric<typename Math::result_type>::template MathOp<M>(lhs, rhs);
+}
+
+// General purpose wrapper template for arithmetic operations.
+template <template <typename, typename, typename> class M, typename L, typename R, typename... Args>
+constexpr auto CheckMathOp(const L lhs, const R rhs, const Args... args)
+{
+ return CheckMathOp<M>(CheckMathOp<M>(lhs, rhs), args...);
+}
+
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Add, +, +=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Sub, -, -=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Mul, *, *=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Div, /, /=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Mod, %, %=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Lsh, <<, <<=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Rsh, >>, >>=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, And, &, &=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Or, |, |=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Xor, ^, ^=)
+BASE_NUMERIC_ARITHMETIC_VARIADIC(Checked, Check, Max)
+BASE_NUMERIC_ARITHMETIC_VARIADIC(Checked, Check, Min)
+
+// These are some extra StrictNumeric operators to support simple pointer
+// arithmetic with our result types. Since wrapping on a pointer is always
+// bad, we trigger the CHECK condition here.
+template <typename L, typename R>
+L *operator+(L *lhs, const StrictNumeric<R> rhs)
+{
+ const uintptr_t result =
+ CheckAdd(reinterpret_cast<uintptr_t>(lhs), CheckMul(sizeof(L), static_cast<R>(rhs)))
+ .template ValueOrDie<uintptr_t>();
+ return reinterpret_cast<L *>(result);
+}
+
+template <typename L, typename R>
+L *operator-(L *lhs, const StrictNumeric<R> rhs)
+{
+ const uintptr_t result =
+ CheckSub(reinterpret_cast<uintptr_t>(lhs), CheckMul(sizeof(L), static_cast<R>(rhs)))
+ .template ValueOrDie<uintptr_t>();
+ return reinterpret_cast<L *>(result);
+}
+
+} // namespace internal
+
+using internal::CheckAdd;
+using internal::CheckAnd;
+using internal::CheckDiv;
+using internal::CheckedNumeric;
+using internal::CheckLsh;
+using internal::CheckMax;
+using internal::CheckMin;
+using internal::CheckMod;
+using internal::CheckMul;
+using internal::CheckOr;
+using internal::CheckRsh;
+using internal::CheckSub;
+using internal::CheckXor;
+using internal::IsValidForType;
+using internal::MakeCheckedNum;
+using internal::ValueOrDefaultForType;
+using internal::ValueOrDieForType;
+
+} // namespace base
+} // namespace angle
+
+#endif // BASE_NUMERICS_CHECKED_MATH_H_
diff --git a/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/checked_math_impl.h b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/checked_math_impl.h
new file mode 100644
index 0000000000..e4b6082770
--- /dev/null
+++ b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/checked_math_impl.h
@@ -0,0 +1,641 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NUMERICS_CHECKED_MATH_IMPL_H_
+#define BASE_NUMERICS_CHECKED_MATH_IMPL_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <climits>
+#include <cmath>
+#include <cstdlib>
+#include <limits>
+#include <type_traits>
+
+#include "anglebase/numerics/safe_conversions.h"
+#include "anglebase/numerics/safe_math_shared_impl.h"
+
+namespace angle
+{
+namespace base
+{
+namespace internal
+{
+
+template <typename T>
+constexpr bool CheckedAddImpl(T x, T y, T *result)
+{
+ static_assert(std::is_integral<T>::value, "Type must be integral");
+ // Since the value of x+y is undefined if we have a signed type, we compute
+ // it using the unsigned type of the same size.
+ using UnsignedDst = typename std::make_unsigned<T>::type;
+ using SignedDst = typename std::make_signed<T>::type;
+ const UnsignedDst ux = static_cast<UnsignedDst>(x);
+ const UnsignedDst uy = static_cast<UnsignedDst>(y);
+ const UnsignedDst uresult = static_cast<UnsignedDst>(ux + uy);
+ // Addition is valid if the sign of (x + y) is equal to either that of x or
+ // that of y.
+ if (std::is_signed<T>::value ? static_cast<SignedDst>((uresult ^ ux) & (uresult ^ uy)) < 0
+ : uresult < uy) // Unsigned is either valid or underflow.
+ return false;
+ *result = static_cast<T>(uresult);
+ return true;
+}
+
+template <typename T, typename U, class Enable = void>
+struct CheckedAddOp
+{};
+
+template <typename T, typename U>
+struct CheckedAddOp<
+ T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value && std::is_integral<U>::value>::type>
+{
+ using result_type = typename MaxExponentPromotion<T, U>::type;
+ template <typename V>
+ static constexpr bool Do(T x, U y, V *result)
+ {
+ if constexpr (CheckedAddFastOp<T, U>::is_supported)
+ return CheckedAddFastOp<T, U>::Do(x, y, result);
+
+ // Double the underlying type up to a full machine word.
+ using FastPromotion = typename FastIntegerArithmeticPromotion<T, U>::type;
+ using Promotion =
+ typename std::conditional<(IntegerBitsPlusSign<FastPromotion>::value >
+ IntegerBitsPlusSign<intptr_t>::value),
+ typename BigEnoughPromotion<T, U>::type, FastPromotion>::type;
+ // Fail if either operand is out of range for the promoted type.
+ // TODO(jschuh): This could be made to work for a broader range of values.
+ if (BASE_NUMERICS_UNLIKELY(!IsValueInRangeForNumericType<Promotion>(x) ||
+ !IsValueInRangeForNumericType<Promotion>(y)))
+ {
+ return false;
+ }
+
+ Promotion presult = {};
+ bool is_valid = true;
+ if constexpr (IsIntegerArithmeticSafe<Promotion, T, U>::value)
+ {
+ presult = static_cast<Promotion>(x) + static_cast<Promotion>(y);
+ }
+ else
+ {
+ is_valid =
+ CheckedAddImpl(static_cast<Promotion>(x), static_cast<Promotion>(y), &presult);
+ }
+ if (!is_valid || !IsValueInRangeForNumericType<V>(presult))
+ return false;
+ *result = static_cast<V>(presult);
+ return true;
+ }
+};
+
+template <typename T>
+constexpr bool CheckedSubImpl(T x, T y, T *result)
+{
+ static_assert(std::is_integral<T>::value, "Type must be integral");
+ // Since the value of x+y is undefined if we have a signed type, we compute
+ // it using the unsigned type of the same size.
+ using UnsignedDst = typename std::make_unsigned<T>::type;
+ using SignedDst = typename std::make_signed<T>::type;
+ const UnsignedDst ux = static_cast<UnsignedDst>(x);
+ const UnsignedDst uy = static_cast<UnsignedDst>(y);
+ const UnsignedDst uresult = static_cast<UnsignedDst>(ux - uy);
+ // Subtraction is valid if either x and y have same sign, or (x-y) and x have
+ // the same sign.
+ if (std::is_signed<T>::value ? static_cast<SignedDst>((uresult ^ ux) & (ux ^ uy)) < 0 : x < y)
+ return false;
+ *result = static_cast<T>(uresult);
+ return true;
+}
+
+template <typename T, typename U, class Enable = void>
+struct CheckedSubOp
+{};
+
+template <typename T, typename U>
+struct CheckedSubOp<
+ T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value && std::is_integral<U>::value>::type>
+{
+ using result_type = typename MaxExponentPromotion<T, U>::type;
+ template <typename V>
+ static constexpr bool Do(T x, U y, V *result)
+ {
+ if constexpr (CheckedSubFastOp<T, U>::is_supported)
+ return CheckedSubFastOp<T, U>::Do(x, y, result);
+
+ // Double the underlying type up to a full machine word.
+ using FastPromotion = typename FastIntegerArithmeticPromotion<T, U>::type;
+ using Promotion =
+ typename std::conditional<(IntegerBitsPlusSign<FastPromotion>::value >
+ IntegerBitsPlusSign<intptr_t>::value),
+ typename BigEnoughPromotion<T, U>::type, FastPromotion>::type;
+ // Fail if either operand is out of range for the promoted type.
+ // TODO(jschuh): This could be made to work for a broader range of values.
+ if (BASE_NUMERICS_UNLIKELY(!IsValueInRangeForNumericType<Promotion>(x) ||
+ !IsValueInRangeForNumericType<Promotion>(y)))
+ {
+ return false;
+ }
+
+ Promotion presult = {};
+ bool is_valid = true;
+ if constexpr (IsIntegerArithmeticSafe<Promotion, T, U>::value)
+ {
+ presult = static_cast<Promotion>(x) - static_cast<Promotion>(y);
+ }
+ else
+ {
+ is_valid =
+ CheckedSubImpl(static_cast<Promotion>(x), static_cast<Promotion>(y), &presult);
+ }
+ if (!is_valid || !IsValueInRangeForNumericType<V>(presult))
+ return false;
+ *result = static_cast<V>(presult);
+ return true;
+ }
+};
+
+template <typename T>
+constexpr bool CheckedMulImpl(T x, T y, T *result)
+{
+ static_assert(std::is_integral<T>::value, "Type must be integral");
+ // Since the value of x*y is potentially undefined if we have a signed type,
+ // we compute it using the unsigned type of the same size.
+ using UnsignedDst = typename std::make_unsigned<T>::type;
+ using SignedDst = typename std::make_signed<T>::type;
+ const UnsignedDst ux = SafeUnsignedAbs(x);
+ const UnsignedDst uy = SafeUnsignedAbs(y);
+ const UnsignedDst uresult = static_cast<UnsignedDst>(ux * uy);
+ const bool is_negative = std::is_signed<T>::value && static_cast<SignedDst>(x ^ y) < 0;
+ // We have a fast out for unsigned identity or zero on the second operand.
+ // After that it's an unsigned overflow check on the absolute value, with
+ // a +1 bound for a negative result.
+ if (uy > UnsignedDst(!std::is_signed<T>::value || is_negative) &&
+ ux > (std::numeric_limits<T>::max() + UnsignedDst(is_negative)) / uy)
+ return false;
+ *result = is_negative ? 0 - uresult : uresult;
+ return true;
+}
+
+template <typename T, typename U, class Enable = void>
+struct CheckedMulOp
+{};
+
+template <typename T, typename U>
+struct CheckedMulOp<
+ T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value && std::is_integral<U>::value>::type>
+{
+ using result_type = typename MaxExponentPromotion<T, U>::type;
+ template <typename V>
+ static constexpr bool Do(T x, U y, V *result)
+ {
+ if constexpr (CheckedMulFastOp<T, U>::is_supported)
+ return CheckedMulFastOp<T, U>::Do(x, y, result);
+
+ using Promotion = typename FastIntegerArithmeticPromotion<T, U>::type;
+ // Verify the destination type can hold the result (always true for 0).
+ if (BASE_NUMERICS_UNLIKELY((!IsValueInRangeForNumericType<Promotion>(x) ||
+ !IsValueInRangeForNumericType<Promotion>(y)) &&
+ x && y))
+ {
+ return false;
+ }
+
+ Promotion presult = {};
+ bool is_valid = true;
+ if constexpr (CheckedMulFastOp<Promotion, Promotion>::is_supported)
+ {
+ // The fast op may be available with the promoted type.
+ is_valid = CheckedMulFastOp<Promotion, Promotion>::Do(x, y, &presult);
+ }
+ else if (IsIntegerArithmeticSafe<Promotion, T, U>::value)
+ {
+ presult = static_cast<Promotion>(x) * static_cast<Promotion>(y);
+ }
+ else
+ {
+ is_valid =
+ CheckedMulImpl(static_cast<Promotion>(x), static_cast<Promotion>(y), &presult);
+ }
+ if (!is_valid || !IsValueInRangeForNumericType<V>(presult))
+ return false;
+ *result = static_cast<V>(presult);
+ return true;
+ }
+};
+
+// Division just requires a check for a zero denominator or an invalid negation
+// on signed min/-1.
+template <typename T, typename U, class Enable = void>
+struct CheckedDivOp
+{};
+
+template <typename T, typename U>
+struct CheckedDivOp<
+ T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value && std::is_integral<U>::value>::type>
+{
+ using result_type = typename MaxExponentPromotion<T, U>::type;
+ template <typename V>
+ static constexpr bool Do(T x, U y, V *result)
+ {
+ if (BASE_NUMERICS_UNLIKELY(!y))
+ return false;
+
+ // The overflow check can be compiled away if we don't have the exact
+ // combination of types needed to trigger this case.
+ using Promotion = typename BigEnoughPromotion<T, U>::type;
+ if (BASE_NUMERICS_UNLIKELY(
+ (std::is_signed<T>::value && std::is_signed<U>::value &&
+ IsTypeInRangeForNumericType<T, Promotion>::value &&
+ static_cast<Promotion>(x) == std::numeric_limits<Promotion>::lowest() &&
+ y == static_cast<U>(-1))))
+ {
+ return false;
+ }
+
+ // This branch always compiles away if the above branch wasn't removed.
+ if (BASE_NUMERICS_UNLIKELY((!IsValueInRangeForNumericType<Promotion>(x) ||
+ !IsValueInRangeForNumericType<Promotion>(y)) &&
+ x))
+ {
+ return false;
+ }
+
+ const Promotion presult = Promotion(x) / Promotion(y);
+ if (!IsValueInRangeForNumericType<V>(presult))
+ return false;
+ *result = static_cast<V>(presult);
+ return true;
+ }
+};
+
+template <typename T, typename U, class Enable = void>
+struct CheckedModOp
+{};
+
+template <typename T, typename U>
+struct CheckedModOp<
+ T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value && std::is_integral<U>::value>::type>
+{
+ using result_type = typename MaxExponentPromotion<T, U>::type;
+ template <typename V>
+ static constexpr bool Do(T x, U y, V *result)
+ {
+ if (BASE_NUMERICS_UNLIKELY(!y))
+ return false;
+
+ using Promotion = typename BigEnoughPromotion<T, U>::type;
+ if (BASE_NUMERICS_UNLIKELY(
+ (std::is_signed<T>::value && std::is_signed<U>::value &&
+ IsTypeInRangeForNumericType<T, Promotion>::value &&
+ static_cast<Promotion>(x) == std::numeric_limits<Promotion>::lowest() &&
+ y == static_cast<U>(-1))))
+ {
+ *result = 0;
+ return true;
+ }
+
+ const Promotion presult = static_cast<Promotion>(x) % static_cast<Promotion>(y);
+ if (!IsValueInRangeForNumericType<V>(presult))
+ return false;
+ *result = static_cast<Promotion>(presult);
+ return true;
+ }
+};
+
+template <typename T, typename U, class Enable = void>
+struct CheckedLshOp
+{};
+
+// Left shift. Shifts less than 0 or greater than or equal to the number
+// of bits in the promoted type are undefined. Shifts of negative values
+// are undefined. Otherwise it is defined when the result fits.
+template <typename T, typename U>
+struct CheckedLshOp<
+ T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value && std::is_integral<U>::value>::type>
+{
+ using result_type = T;
+ template <typename V>
+ static constexpr bool Do(T x, U shift, V *result)
+ {
+ // Disallow negative numbers and verify the shift is in bounds.
+ if (BASE_NUMERICS_LIKELY(!IsValueNegative(x) &&
+ as_unsigned(shift) < as_unsigned(std::numeric_limits<T>::digits)))
+ {
+ // Shift as unsigned to avoid undefined behavior.
+ *result = static_cast<V>(as_unsigned(x) << shift);
+ // If the shift can be reversed, we know it was valid.
+ return *result >> shift == x;
+ }
+
+ // Handle the legal corner-case of a full-width signed shift of zero.
+ if (!std::is_signed<T>::value || x ||
+ as_unsigned(shift) != as_unsigned(std::numeric_limits<T>::digits))
+ return false;
+ *result = 0;
+ return true;
+ }
+};
+
+template <typename T, typename U, class Enable = void>
+struct CheckedRshOp
+{};
+
+// Right shift. Shifts less than 0 or greater than or equal to the number
+// of bits in the promoted type are undefined. Otherwise, it is always defined,
+// but a right shift of a negative value is implementation-dependent.
+template <typename T, typename U>
+struct CheckedRshOp<
+ T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value && std::is_integral<U>::value>::type>
+{
+ using result_type = T;
+ template <typename V>
+ static bool Do(T x, U shift, V *result)
+ {
+ // Use sign conversion to push negative values out of range.
+ if (BASE_NUMERICS_UNLIKELY(as_unsigned(shift) >= IntegerBitsPlusSign<T>::value))
+ {
+ return false;
+ }
+
+ const T tmp = x >> shift;
+ if (!IsValueInRangeForNumericType<V>(tmp))
+ return false;
+ *result = static_cast<V>(tmp);
+ return true;
+ }
+};
+
+template <typename T, typename U, class Enable = void>
+struct CheckedAndOp
+{};
+
+// For simplicity we support only unsigned integer results.
+template <typename T, typename U>
+struct CheckedAndOp<
+ T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value && std::is_integral<U>::value>::type>
+{
+ using result_type =
+ typename std::make_unsigned<typename MaxExponentPromotion<T, U>::type>::type;
+ template <typename V>
+ static constexpr bool Do(T x, U y, V *result)
+ {
+ const result_type tmp = static_cast<result_type>(x) & static_cast<result_type>(y);
+ if (!IsValueInRangeForNumericType<V>(tmp))
+ return false;
+ *result = static_cast<V>(tmp);
+ return true;
+ }
+};
+
+template <typename T, typename U, class Enable = void>
+struct CheckedOrOp
+{};
+
+// For simplicity we support only unsigned integers.
+template <typename T, typename U>
+struct CheckedOrOp<
+ T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value && std::is_integral<U>::value>::type>
+{
+ using result_type =
+ typename std::make_unsigned<typename MaxExponentPromotion<T, U>::type>::type;
+ template <typename V>
+ static constexpr bool Do(T x, U y, V *result)
+ {
+ const result_type tmp = static_cast<result_type>(x) | static_cast<result_type>(y);
+ if (!IsValueInRangeForNumericType<V>(tmp))
+ return false;
+ *result = static_cast<V>(tmp);
+ return true;
+ }
+};
+
+template <typename T, typename U, class Enable = void>
+struct CheckedXorOp
+{};
+
+// For simplicity we support only unsigned integers.
+template <typename T, typename U>
+struct CheckedXorOp<
+ T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value && std::is_integral<U>::value>::type>
+{
+ using result_type =
+ typename std::make_unsigned<typename MaxExponentPromotion<T, U>::type>::type;
+ template <typename V>
+ static constexpr bool Do(T x, U y, V *result)
+ {
+ const result_type tmp = static_cast<result_type>(x) ^ static_cast<result_type>(y);
+ if (!IsValueInRangeForNumericType<V>(tmp))
+ return false;
+ *result = static_cast<V>(tmp);
+ return true;
+ }
+};
+
+// Max doesn't really need to be implemented this way because it can't fail,
+// but it makes the code much cleaner to use the MathOp wrappers.
+template <typename T, typename U, class Enable = void>
+struct CheckedMaxOp
+{};
+
+template <typename T, typename U>
+struct CheckedMaxOp<
+ T,
+ U,
+ typename std::enable_if<std::is_arithmetic<T>::value && std::is_arithmetic<U>::value>::type>
+{
+ using result_type = typename MaxExponentPromotion<T, U>::type;
+ template <typename V>
+ static constexpr bool Do(T x, U y, V *result)
+ {
+ const result_type tmp =
+ IsGreater<T, U>::Test(x, y) ? static_cast<result_type>(x) : static_cast<result_type>(y);
+ if (!IsValueInRangeForNumericType<V>(tmp))
+ return false;
+ *result = static_cast<V>(tmp);
+ return true;
+ }
+};
+
+// Min doesn't really need to be implemented this way because it can't fail,
+// but it makes the code much cleaner to use the MathOp wrappers.
+template <typename T, typename U, class Enable = void>
+struct CheckedMinOp
+{};
+
+template <typename T, typename U>
+struct CheckedMinOp<
+ T,
+ U,
+ typename std::enable_if<std::is_arithmetic<T>::value && std::is_arithmetic<U>::value>::type>
+{
+ using result_type = typename LowestValuePromotion<T, U>::type;
+ template <typename V>
+ static constexpr bool Do(T x, U y, V *result)
+ {
+ const result_type tmp =
+ IsLess<T, U>::Test(x, y) ? static_cast<result_type>(x) : static_cast<result_type>(y);
+ if (!IsValueInRangeForNumericType<V>(tmp))
+ return false;
+ *result = static_cast<V>(tmp);
+ return true;
+ }
+};
+
+// This is just boilerplate that wraps the standard floating point arithmetic.
+// A macro isn't the nicest solution, but it beats rewriting these repeatedly.
+#define BASE_FLOAT_ARITHMETIC_OPS(NAME, OP) \
+ template <typename T, typename U> \
+ struct Checked##NAME##Op<T, U, \
+ typename std::enable_if<std::is_floating_point<T>::value || \
+ std::is_floating_point<U>::value>::type> \
+ { \
+ using result_type = typename MaxExponentPromotion<T, U>::type; \
+ template <typename V> \
+ static constexpr bool Do(T x, U y, V *result) \
+ { \
+ using Promotion = typename MaxExponentPromotion<T, U>::type; \
+ const Promotion presult = x OP y; \
+ if (!IsValueInRangeForNumericType<V>(presult)) \
+ return false; \
+ *result = static_cast<V>(presult); \
+ return true; \
+ } \
+ };
+
+BASE_FLOAT_ARITHMETIC_OPS(Add, +)
+BASE_FLOAT_ARITHMETIC_OPS(Sub, -)
+BASE_FLOAT_ARITHMETIC_OPS(Mul, *)
+BASE_FLOAT_ARITHMETIC_OPS(Div, /)
+
+#undef BASE_FLOAT_ARITHMETIC_OPS
+
+// Floats carry around their validity state with them, but integers do not. So,
+// we wrap the underlying value in a specialization in order to hide that detail
+// and expose an interface via accessors.
+enum NumericRepresentation
+{
+ NUMERIC_INTEGER,
+ NUMERIC_FLOATING,
+ NUMERIC_UNKNOWN
+};
+
+template <typename NumericType>
+struct GetNumericRepresentation
+{
+ static const NumericRepresentation value =
+ std::is_integral<NumericType>::value
+ ? NUMERIC_INTEGER
+ : (std::is_floating_point<NumericType>::value ? NUMERIC_FLOATING : NUMERIC_UNKNOWN);
+};
+
+template <typename T, NumericRepresentation type = GetNumericRepresentation<T>::value>
+class CheckedNumericState
+{};
+
+// Integrals require quite a bit of additional housekeeping to manage state.
+template <typename T>
+class CheckedNumericState<T, NUMERIC_INTEGER>
+{
+ public:
+ template <typename Src = int>
+ constexpr explicit CheckedNumericState(Src value = 0, bool is_valid = true)
+ : is_valid_(is_valid && IsValueInRangeForNumericType<T>(value)),
+ value_(WellDefinedConversionOrZero(value, is_valid_))
+ {
+ static_assert(std::is_arithmetic<Src>::value, "Argument must be numeric.");
+ }
+
+ template <typename Src>
+ constexpr CheckedNumericState(const CheckedNumericState<Src> &rhs)
+ : CheckedNumericState(rhs.value(), rhs.is_valid())
+ {}
+
+ constexpr bool is_valid() const { return is_valid_; }
+
+ constexpr T value() const { return value_; }
+
+ private:
+ // Ensures that a type conversion does not trigger undefined behavior.
+ template <typename Src>
+ static constexpr T WellDefinedConversionOrZero(Src value, bool is_valid)
+ {
+ using SrcType = typename internal::UnderlyingType<Src>::type;
+ return (std::is_integral<SrcType>::value || is_valid) ? static_cast<T>(value) : 0;
+ }
+
+ // is_valid_ precedes value_ because member intializers in the constructors
+ // are evaluated in field order, and is_valid_ must be read when initializing
+ // value_.
+ bool is_valid_;
+ T value_;
+};
+
+// Floating points maintain their own validity, but need translation wrappers.
+template <typename T>
+class CheckedNumericState<T, NUMERIC_FLOATING>
+{
+ public:
+ template <typename Src = double>
+ constexpr explicit CheckedNumericState(Src value = 0.0, bool is_valid = true)
+ : value_(
+ WellDefinedConversionOrNaN(value, is_valid && IsValueInRangeForNumericType<T>(value)))
+ {}
+
+ template <typename Src>
+ constexpr CheckedNumericState(const CheckedNumericState<Src> &rhs)
+ : CheckedNumericState(rhs.value(), rhs.is_valid())
+ {}
+
+ constexpr bool is_valid() const
+ {
+ // Written this way because std::isfinite is not reliably constexpr.
+ return MustTreatAsConstexpr(value_) ? value_ <= std::numeric_limits<T>::max() &&
+ value_ >= std::numeric_limits<T>::lowest()
+ : std::isfinite(value_);
+ }
+
+ constexpr T value() const { return value_; }
+
+ private:
+ // Ensures that a type conversion does not trigger undefined behavior.
+ template <typename Src>
+ static constexpr T WellDefinedConversionOrNaN(Src value, bool is_valid)
+ {
+ using SrcType = typename internal::UnderlyingType<Src>::type;
+ return (StaticDstRangeRelationToSrcRange<T, SrcType>::value == NUMERIC_RANGE_CONTAINED ||
+ is_valid)
+ ? static_cast<T>(value)
+ : std::numeric_limits<T>::quiet_NaN();
+ }
+
+ T value_;
+};
+
+} // namespace internal
+} // namespace base
+} // namespace angle
+
+#endif // BASE_NUMERICS_CHECKED_MATH_IMPL_H_
diff --git a/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/clamped_math.h b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/clamped_math.h
new file mode 100644
index 0000000000..33d2e4b233
--- /dev/null
+++ b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/clamped_math.h
@@ -0,0 +1,270 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NUMERICS_CLAMPED_MATH_H_
+#define BASE_NUMERICS_CLAMPED_MATH_H_
+
+#include <stddef.h>
+
+#include <limits>
+#include <type_traits>
+
+#include "anglebase/numerics/clamped_math_impl.h"
+
+namespace angle
+{
+namespace base
+{
+namespace internal
+{
+
+template <typename T>
+class ClampedNumeric
+{
+ static_assert(std::is_arithmetic<T>::value, "ClampedNumeric<T>: T must be a numeric type.");
+
+ public:
+ using type = T;
+
+ constexpr ClampedNumeric() : value_(0) {}
+
+ // Copy constructor.
+ template <typename Src>
+ constexpr ClampedNumeric(const ClampedNumeric<Src> &rhs) : value_(saturated_cast<T>(rhs.value_))
+ {}
+
+ template <typename Src>
+ friend class ClampedNumeric;
+
+ // This is not an explicit constructor because we implicitly upgrade regular
+ // numerics to ClampedNumerics to make them easier to use.
+ template <typename Src>
+ constexpr ClampedNumeric(Src value) // NOLINT(runtime/explicit)
+ : value_(saturated_cast<T>(value))
+ {
+ static_assert(std::is_arithmetic<Src>::value, "Argument must be numeric.");
+ }
+
+ // This is not an explicit constructor because we want a seamless conversion
+ // from StrictNumeric types.
+ template <typename Src>
+ constexpr ClampedNumeric(StrictNumeric<Src> value) // NOLINT(runtime/explicit)
+ : value_(saturated_cast<T>(static_cast<Src>(value)))
+ {}
+
+ // Returns a ClampedNumeric of the specified type, cast from the current
+ // ClampedNumeric, and saturated to the destination type.
+ template <typename Dst>
+ constexpr ClampedNumeric<typename UnderlyingType<Dst>::type> Cast() const
+ {
+ return *this;
+ }
+
+ // Prototypes for the supported arithmetic operator overloads.
+ template <typename Src>
+ constexpr ClampedNumeric &operator+=(const Src rhs);
+ template <typename Src>
+ constexpr ClampedNumeric &operator-=(const Src rhs);
+ template <typename Src>
+ constexpr ClampedNumeric &operator*=(const Src rhs);
+ template <typename Src>
+ constexpr ClampedNumeric &operator/=(const Src rhs);
+ template <typename Src>
+ constexpr ClampedNumeric &operator%=(const Src rhs);
+ template <typename Src>
+ constexpr ClampedNumeric &operator<<=(const Src rhs);
+ template <typename Src>
+ constexpr ClampedNumeric &operator>>=(const Src rhs);
+ template <typename Src>
+ constexpr ClampedNumeric &operator&=(const Src rhs);
+ template <typename Src>
+ constexpr ClampedNumeric &operator|=(const Src rhs);
+ template <typename Src>
+ constexpr ClampedNumeric &operator^=(const Src rhs);
+
+ constexpr ClampedNumeric operator-() const
+ {
+ // The negation of two's complement int min is int min, so that's the
+ // only overflow case where we will saturate.
+ return ClampedNumeric<T>(SaturatedNegWrapper(value_));
+ }
+
+ constexpr ClampedNumeric operator~() const
+ {
+ return ClampedNumeric<decltype(InvertWrapper(T()))>(InvertWrapper(value_));
+ }
+
+ constexpr ClampedNumeric Abs() const
+ {
+ // The negation of two's complement int min is int min, so that's the
+ // only overflow case where we will saturate.
+ return ClampedNumeric<T>(SaturatedAbsWrapper(value_));
+ }
+
+ template <typename U>
+ constexpr ClampedNumeric<typename MathWrapper<ClampedMaxOp, T, U>::type> Max(const U rhs) const
+ {
+ using result_type = typename MathWrapper<ClampedMaxOp, T, U>::type;
+ return ClampedNumeric<result_type>(ClampedMaxOp<T, U>::Do(value_, Wrapper<U>::value(rhs)));
+ }
+
+ template <typename U>
+ constexpr ClampedNumeric<typename MathWrapper<ClampedMinOp, T, U>::type> Min(const U rhs) const
+ {
+ using result_type = typename MathWrapper<ClampedMinOp, T, U>::type;
+ return ClampedNumeric<result_type>(ClampedMinOp<T, U>::Do(value_, Wrapper<U>::value(rhs)));
+ }
+
+ // This function is available only for integral types. It returns an unsigned
+ // integer of the same width as the source type, containing the absolute value
+ // of the source, and properly handling signed min.
+ constexpr ClampedNumeric<typename UnsignedOrFloatForSize<T>::type> UnsignedAbs() const
+ {
+ return ClampedNumeric<typename UnsignedOrFloatForSize<T>::type>(SafeUnsignedAbs(value_));
+ }
+
+ constexpr ClampedNumeric &operator++()
+ {
+ *this += 1;
+ return *this;
+ }
+
+ constexpr ClampedNumeric operator++(int)
+ {
+ ClampedNumeric value = *this;
+ *this += 1;
+ return value;
+ }
+
+ constexpr ClampedNumeric &operator--()
+ {
+ *this -= 1;
+ return *this;
+ }
+
+ constexpr ClampedNumeric operator--(int)
+ {
+ ClampedNumeric value = *this;
+ *this -= 1;
+ return value;
+ }
+
+ // These perform the actual math operations on the ClampedNumerics.
+ // Binary arithmetic operations.
+ template <template <typename, typename, typename> class M, typename L, typename R>
+ static constexpr ClampedNumeric MathOp(const L lhs, const R rhs)
+ {
+ using Math = typename MathWrapper<M, L, R>::math;
+ return ClampedNumeric<T>(
+ Math::template Do<T>(Wrapper<L>::value(lhs), Wrapper<R>::value(rhs)));
+ }
+
+ // Assignment arithmetic operations.
+ template <template <typename, typename, typename> class M, typename R>
+ constexpr ClampedNumeric &MathOp(const R rhs)
+ {
+ using Math = typename MathWrapper<M, T, R>::math;
+ *this = ClampedNumeric<T>(Math::template Do<T>(value_, Wrapper<R>::value(rhs)));
+ return *this;
+ }
+
+ template <typename Dst>
+ constexpr operator Dst() const
+ {
+ return saturated_cast<typename ArithmeticOrUnderlyingEnum<Dst>::type>(value_);
+ }
+
+ // This method extracts the raw integer value without saturating it to the
+ // destination type as the conversion operator does. This is useful when
+ // e.g. assigning to an auto type or passing as a deduced template parameter.
+ constexpr T RawValue() const { return value_; }
+
+ private:
+ T value_;
+
+ // These wrappers allow us to handle state the same way for both
+ // ClampedNumeric and POD arithmetic types.
+ template <typename Src>
+ struct Wrapper
+ {
+ static constexpr Src value(Src value)
+ {
+ return static_cast<typename UnderlyingType<Src>::type>(value);
+ }
+ };
+};
+
+// Convience wrapper to return a new ClampedNumeric from the provided arithmetic
+// or ClampedNumericType.
+template <typename T>
+constexpr ClampedNumeric<typename UnderlyingType<T>::type> MakeClampedNum(const T value)
+{
+ return value;
+}
+
+#if !BASE_NUMERICS_DISABLE_OSTREAM_OPERATORS
+// Overload the ostream output operator to make logging work nicely.
+template <typename T>
+std::ostream &operator<<(std::ostream &os, const ClampedNumeric<T> &value)
+{
+ os << static_cast<T>(value);
+ return os;
+}
+#endif
+
+// These implement the variadic wrapper for the math operations.
+template <template <typename, typename, typename> class M, typename L, typename R>
+constexpr ClampedNumeric<typename MathWrapper<M, L, R>::type> ClampMathOp(const L lhs, const R rhs)
+{
+ using Math = typename MathWrapper<M, L, R>::math;
+ return ClampedNumeric<typename Math::result_type>::template MathOp<M>(lhs, rhs);
+}
+
+// General purpose wrapper template for arithmetic operations.
+template <template <typename, typename, typename> class M, typename L, typename R, typename... Args>
+constexpr auto ClampMathOp(const L lhs, const R rhs, const Args... args)
+{
+ return ClampMathOp<M>(ClampMathOp<M>(lhs, rhs), args...);
+}
+
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Add, +, +=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Sub, -, -=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Mul, *, *=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Div, /, /=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Mod, %, %=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Lsh, <<, <<=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Rsh, >>, >>=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, And, &, &=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Or, |, |=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Xor, ^, ^=)
+BASE_NUMERIC_ARITHMETIC_VARIADIC(Clamped, Clamp, Max)
+BASE_NUMERIC_ARITHMETIC_VARIADIC(Clamped, Clamp, Min)
+BASE_NUMERIC_COMPARISON_OPERATORS(Clamped, IsLess, <)
+BASE_NUMERIC_COMPARISON_OPERATORS(Clamped, IsLessOrEqual, <=)
+BASE_NUMERIC_COMPARISON_OPERATORS(Clamped, IsGreater, >)
+BASE_NUMERIC_COMPARISON_OPERATORS(Clamped, IsGreaterOrEqual, >=)
+BASE_NUMERIC_COMPARISON_OPERATORS(Clamped, IsEqual, ==)
+BASE_NUMERIC_COMPARISON_OPERATORS(Clamped, IsNotEqual, !=)
+
+} // namespace internal
+
+using internal::ClampAdd;
+using internal::ClampAnd;
+using internal::ClampDiv;
+using internal::ClampedNumeric;
+using internal::ClampLsh;
+using internal::ClampMax;
+using internal::ClampMin;
+using internal::ClampMod;
+using internal::ClampMul;
+using internal::ClampOr;
+using internal::ClampRsh;
+using internal::ClampSub;
+using internal::ClampXor;
+using internal::MakeClampedNum;
+
+} // namespace base
+} // namespace angle
+
+#endif // BASE_NUMERICS_CLAMPED_MATH_H_
diff --git a/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/clamped_math_impl.h b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/clamped_math_impl.h
new file mode 100644
index 0000000000..198723eeaa
--- /dev/null
+++ b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/clamped_math_impl.h
@@ -0,0 +1,368 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NUMERICS_CLAMPED_MATH_IMPL_H_
+#define BASE_NUMERICS_CLAMPED_MATH_IMPL_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <climits>
+#include <cmath>
+#include <cstdlib>
+#include <limits>
+#include <type_traits>
+
+#include "anglebase/numerics/checked_math.h"
+#include "anglebase/numerics/safe_conversions.h"
+#include "anglebase/numerics/safe_math_shared_impl.h"
+
+namespace angle
+{
+namespace base
+{
+namespace internal
+{
+
+template <typename T,
+ typename std::enable_if<std::is_integral<T>::value && std::is_signed<T>::value>::type * =
+ nullptr>
+constexpr T SaturatedNegWrapper(T value)
+{
+ return MustTreatAsConstexpr(value) || !ClampedNegFastOp<T>::is_supported
+ ? (NegateWrapper(value) != std::numeric_limits<T>::lowest()
+ ? NegateWrapper(value)
+ : std::numeric_limits<T>::max())
+ : ClampedNegFastOp<T>::Do(value);
+}
+
+template <typename T,
+ typename std::enable_if<std::is_integral<T>::value && !std::is_signed<T>::value>::type * =
+ nullptr>
+constexpr T SaturatedNegWrapper(T value)
+{
+ return T(0);
+}
+
+template <typename T, typename std::enable_if<std::is_floating_point<T>::value>::type * = nullptr>
+constexpr T SaturatedNegWrapper(T value)
+{
+ return -value;
+}
+
+template <typename T, typename std::enable_if<std::is_integral<T>::value>::type * = nullptr>
+constexpr T SaturatedAbsWrapper(T value)
+{
+ // The calculation below is a static identity for unsigned types, but for
+ // signed integer types it provides a non-branching, saturated absolute value.
+ // This works because SafeUnsignedAbs() returns an unsigned type, which can
+ // represent the absolute value of all negative numbers of an equal-width
+ // integer type. The call to IsValueNegative() then detects overflow in the
+ // special case of numeric_limits<T>::min(), by evaluating the bit pattern as
+ // a signed integer value. If it is the overflow case, we end up subtracting
+ // one from the unsigned result, thus saturating to numeric_limits<T>::max().
+ return static_cast<T>(SafeUnsignedAbs(value) - IsValueNegative<T>(SafeUnsignedAbs(value)));
+}
+
+template <typename T, typename std::enable_if<std::is_floating_point<T>::value>::type * = nullptr>
+constexpr T SaturatedAbsWrapper(T value)
+{
+ return value < 0 ? -value : value;
+}
+
+template <typename T, typename U, class Enable = void>
+struct ClampedAddOp
+{};
+
+template <typename T, typename U>
+struct ClampedAddOp<
+ T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value && std::is_integral<U>::value>::type>
+{
+ using result_type = typename MaxExponentPromotion<T, U>::type;
+ template <typename V = result_type>
+ static constexpr V Do(T x, U y)
+ {
+ if (ClampedAddFastOp<T, U>::is_supported)
+ return ClampedAddFastOp<T, U>::template Do<V>(x, y);
+
+ static_assert(
+ std::is_same<V, result_type>::value || IsTypeInRangeForNumericType<U, V>::value,
+ "The saturation result cannot be determined from the "
+ "provided types.");
+ const V saturated = CommonMaxOrMin<V>(IsValueNegative(y));
+ V result = {};
+ return BASE_NUMERICS_LIKELY((CheckedAddOp<T, U>::Do(x, y, &result))) ? result : saturated;
+ }
+};
+
+template <typename T, typename U, class Enable = void>
+struct ClampedSubOp
+{};
+
+template <typename T, typename U>
+struct ClampedSubOp<
+ T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value && std::is_integral<U>::value>::type>
+{
+ using result_type = typename MaxExponentPromotion<T, U>::type;
+ template <typename V = result_type>
+ static constexpr V Do(T x, U y)
+ {
+ if constexpr (ClampedSubFastOp<T, U>::is_supported)
+ return ClampedSubFastOp<T, U>::template Do<V>(x, y);
+
+ static_assert(
+ std::is_same<V, result_type>::value || IsTypeInRangeForNumericType<U, V>::value,
+ "The saturation result cannot be determined from the "
+ "provided types.");
+ const V saturated = CommonMaxOrMin<V>(!IsValueNegative(y));
+ V result = {};
+ return BASE_NUMERICS_LIKELY((CheckedSubOp<T, U>::Do(x, y, &result))) ? result : saturated;
+ }
+};
+
+template <typename T, typename U, class Enable = void>
+struct ClampedMulOp
+{};
+
+template <typename T, typename U>
+struct ClampedMulOp<
+ T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value && std::is_integral<U>::value>::type>
+{
+ using result_type = typename MaxExponentPromotion<T, U>::type;
+ template <typename V = result_type>
+ static constexpr V Do(T x, U y)
+ {
+ if constexpr (ClampedMulFastOp<T, U>::is_supported)
+ return ClampedMulFastOp<T, U>::template Do<V>(x, y);
+
+ V result = {};
+ const V saturated = CommonMaxOrMin<V>(IsValueNegative(x) ^ IsValueNegative(y));
+ return BASE_NUMERICS_LIKELY((CheckedMulOp<T, U>::Do(x, y, &result))) ? result : saturated;
+ }
+};
+
+template <typename T, typename U, class Enable = void>
+struct ClampedDivOp
+{};
+
+template <typename T, typename U>
+struct ClampedDivOp<
+ T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value && std::is_integral<U>::value>::type>
+{
+ using result_type = typename MaxExponentPromotion<T, U>::type;
+ template <typename V = result_type>
+ static constexpr V Do(T x, U y)
+ {
+ V result = {};
+ if (BASE_NUMERICS_LIKELY((CheckedDivOp<T, U>::Do(x, y, &result))))
+ return result;
+ // Saturation goes to max, min, or NaN (if x is zero).
+ return x ? CommonMaxOrMin<V>(IsValueNegative(x) ^ IsValueNegative(y))
+ : SaturationDefaultLimits<V>::NaN();
+ }
+};
+
+template <typename T, typename U, class Enable = void>
+struct ClampedModOp
+{};
+
+template <typename T, typename U>
+struct ClampedModOp<
+ T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value && std::is_integral<U>::value>::type>
+{
+ using result_type = typename MaxExponentPromotion<T, U>::type;
+ template <typename V = result_type>
+ static constexpr V Do(T x, U y)
+ {
+ V result = {};
+ return BASE_NUMERICS_LIKELY((CheckedModOp<T, U>::Do(x, y, &result))) ? result : x;
+ }
+};
+
+template <typename T, typename U, class Enable = void>
+struct ClampedLshOp
+{};
+
+// Left shift. Non-zero values saturate in the direction of the sign. A zero
+// shifted by any value always results in zero.
+template <typename T, typename U>
+struct ClampedLshOp<
+ T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value && std::is_integral<U>::value>::type>
+{
+ using result_type = T;
+ template <typename V = result_type>
+ static constexpr V Do(T x, U shift)
+ {
+ static_assert(!std::is_signed<U>::value, "Shift value must be unsigned.");
+ if (BASE_NUMERICS_LIKELY(shift < std::numeric_limits<T>::digits))
+ {
+ // Shift as unsigned to avoid undefined behavior.
+ V result = static_cast<V>(as_unsigned(x) << shift);
+ // If the shift can be reversed, we know it was valid.
+ if (BASE_NUMERICS_LIKELY(result >> shift == x))
+ return result;
+ }
+ return x ? CommonMaxOrMin<V>(IsValueNegative(x)) : 0;
+ }
+};
+
+template <typename T, typename U, class Enable = void>
+struct ClampedRshOp
+{};
+
+// Right shift. Negative values saturate to -1. Positive or 0 saturates to 0.
+template <typename T, typename U>
+struct ClampedRshOp<
+ T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value && std::is_integral<U>::value>::type>
+{
+ using result_type = T;
+ template <typename V = result_type>
+ static constexpr V Do(T x, U shift)
+ {
+ static_assert(!std::is_signed<U>::value, "Shift value must be unsigned.");
+ // Signed right shift is odd, because it saturates to -1 or 0.
+ const V saturated = as_unsigned(V(0)) - IsValueNegative(x);
+ return BASE_NUMERICS_LIKELY(shift < IntegerBitsPlusSign<T>::value)
+ ? saturated_cast<V>(x >> shift)
+ : saturated;
+ }
+};
+
+template <typename T, typename U, class Enable = void>
+struct ClampedAndOp
+{};
+
+template <typename T, typename U>
+struct ClampedAndOp<
+ T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value && std::is_integral<U>::value>::type>
+{
+ using result_type =
+ typename std::make_unsigned<typename MaxExponentPromotion<T, U>::type>::type;
+ template <typename V>
+ static constexpr V Do(T x, U y)
+ {
+ return static_cast<result_type>(x) & static_cast<result_type>(y);
+ }
+};
+
+template <typename T, typename U, class Enable = void>
+struct ClampedOrOp
+{};
+
+// For simplicity we promote to unsigned integers.
+template <typename T, typename U>
+struct ClampedOrOp<
+ T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value && std::is_integral<U>::value>::type>
+{
+ using result_type =
+ typename std::make_unsigned<typename MaxExponentPromotion<T, U>::type>::type;
+ template <typename V>
+ static constexpr V Do(T x, U y)
+ {
+ return static_cast<result_type>(x) | static_cast<result_type>(y);
+ }
+};
+
+template <typename T, typename U, class Enable = void>
+struct ClampedXorOp
+{};
+
+// For simplicity we support only unsigned integers.
+template <typename T, typename U>
+struct ClampedXorOp<
+ T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value && std::is_integral<U>::value>::type>
+{
+ using result_type =
+ typename std::make_unsigned<typename MaxExponentPromotion<T, U>::type>::type;
+ template <typename V>
+ static constexpr V Do(T x, U y)
+ {
+ return static_cast<result_type>(x) ^ static_cast<result_type>(y);
+ }
+};
+
+template <typename T, typename U, class Enable = void>
+struct ClampedMaxOp
+{};
+
+template <typename T, typename U>
+struct ClampedMaxOp<
+ T,
+ U,
+ typename std::enable_if<std::is_arithmetic<T>::value && std::is_arithmetic<U>::value>::type>
+{
+ using result_type = typename MaxExponentPromotion<T, U>::type;
+ template <typename V = result_type>
+ static constexpr V Do(T x, U y)
+ {
+ return IsGreater<T, U>::Test(x, y) ? saturated_cast<V>(x) : saturated_cast<V>(y);
+ }
+};
+
+template <typename T, typename U, class Enable = void>
+struct ClampedMinOp
+{};
+
+template <typename T, typename U>
+struct ClampedMinOp<
+ T,
+ U,
+ typename std::enable_if<std::is_arithmetic<T>::value && std::is_arithmetic<U>::value>::type>
+{
+ using result_type = typename LowestValuePromotion<T, U>::type;
+ template <typename V = result_type>
+ static constexpr V Do(T x, U y)
+ {
+ return IsLess<T, U>::Test(x, y) ? saturated_cast<V>(x) : saturated_cast<V>(y);
+ }
+};
+
+// This is just boilerplate that wraps the standard floating point arithmetic.
+// A macro isn't the nicest solution, but it beats rewriting these repeatedly.
+#define BASE_FLOAT_ARITHMETIC_OPS(NAME, OP) \
+ template <typename T, typename U> \
+ struct Clamped##NAME##Op<T, U, \
+ typename std::enable_if<std::is_floating_point<T>::value || \
+ std::is_floating_point<U>::value>::type> \
+ { \
+ using result_type = typename MaxExponentPromotion<T, U>::type; \
+ template <typename V = result_type> \
+ static constexpr V Do(T x, U y) \
+ { \
+ return saturated_cast<V>(x OP y); \
+ } \
+ };
+
+BASE_FLOAT_ARITHMETIC_OPS(Add, +)
+BASE_FLOAT_ARITHMETIC_OPS(Sub, -)
+BASE_FLOAT_ARITHMETIC_OPS(Mul, *)
+BASE_FLOAT_ARITHMETIC_OPS(Div, /)
+
+#undef BASE_FLOAT_ARITHMETIC_OPS
+
+} // namespace internal
+} // namespace base
+} // namespace angle
+
+#endif // BASE_NUMERICS_CLAMPED_MATH_IMPL_H_
diff --git a/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/math_constants.h b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/math_constants.h
new file mode 100644
index 0000000000..385ce3970f
--- /dev/null
+++ b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/math_constants.h
@@ -0,0 +1,20 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NUMERICS_MATH_CONSTANTS_H_
+#define BASE_NUMERICS_MATH_CONSTANTS_H_
+
+namespace base
+{
+
+constexpr double kPiDouble = 3.14159265358979323846;
+constexpr float kPiFloat = 3.14159265358979323846f;
+
+// The mean acceleration due to gravity on Earth in m/s^2.
+constexpr double kMeanGravityDouble = 9.80665;
+constexpr float kMeanGravityFloat = 9.80665f;
+
+} // namespace base
+
+#endif // BASE_NUMERICS_MATH_CONSTANTS_H_
diff --git a/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/ranges.h b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/ranges.h
new file mode 100644
index 0000000000..55a5a295af
--- /dev/null
+++ b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/ranges.h
@@ -0,0 +1,39 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NUMERICS_RANGES_H_
+#define BASE_NUMERICS_RANGES_H_
+
+#include <algorithm>
+#include <cmath>
+
+namespace base
+{
+
+// DO NOT USE THIS FUNCTION. IT IS DUE TO BE REMOVED. https://crbug.com/1231569
+// Please use base::clamp() from base/cxx17_backports.h instead.
+//
+// This function, unlike base::clamp(), does not check if `min` is greater than
+// `max`, and returns a bogus answer if it is. Please migrate all code that
+// calls this function to use base::clamp() instead.
+//
+// If, for some reason the broken behavior is required, please re-create this
+// min/max nesting inline in the host code and explain with a comment why it
+// is needed.
+template <class T>
+constexpr const T &BrokenClampThatShouldNotBeUsed(const T &value, const T &min, const T &max)
+{
+ return std::min(std::max(value, min), max);
+}
+
+template <typename T>
+constexpr bool IsApproximatelyEqual(T lhs, T rhs, T tolerance)
+{
+ static_assert(std::is_arithmetic<T>::value, "Argument must be arithmetic");
+ return std::abs(rhs - lhs) <= tolerance;
+}
+
+} // namespace base
+
+#endif // BASE_NUMERICS_RANGES_H_
diff --git a/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_conversions.h b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_conversions.h
new file mode 100644
index 0000000000..3aa80c7ed0
--- /dev/null
+++ b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_conversions.h
@@ -0,0 +1,403 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NUMERICS_SAFE_CONVERSIONS_H_
+#define BASE_NUMERICS_SAFE_CONVERSIONS_H_
+
+#include <stddef.h>
+
+#include <cmath>
+#include <limits>
+#include <type_traits>
+
+#include "anglebase/numerics/safe_conversions_impl.h"
+
+#if defined(__ARMEL__) && !defined(__native_client__)
+# include "anglebase/numerics/safe_conversions_arm_impl.h"
+# define BASE_HAS_OPTIMIZED_SAFE_CONVERSIONS (1)
+#else
+# define BASE_HAS_OPTIMIZED_SAFE_CONVERSIONS (0)
+#endif
+
+#if !BASE_NUMERICS_DISABLE_OSTREAM_OPERATORS
+# include <ostream>
+#endif
+
+namespace angle
+{
+namespace base
+{
+namespace internal
+{
+
+#if !BASE_HAS_OPTIMIZED_SAFE_CONVERSIONS
+template <typename Dst, typename Src>
+struct SaturateFastAsmOp
+{
+ static constexpr bool is_supported = false;
+ static constexpr Dst Do(Src)
+ {
+ // Force a compile failure if instantiated.
+ return CheckOnFailure::template HandleFailure<Dst>();
+ }
+};
+#endif // BASE_HAS_OPTIMIZED_SAFE_CONVERSIONS
+#undef BASE_HAS_OPTIMIZED_SAFE_CONVERSIONS
+
+// The following special case a few specific integer conversions where we can
+// eke out better performance than range checking.
+template <typename Dst, typename Src, typename Enable = void>
+struct IsValueInRangeFastOp
+{
+ static constexpr bool is_supported = false;
+ static constexpr bool Do(Src value)
+ {
+ // Force a compile failure if instantiated.
+ return CheckOnFailure::template HandleFailure<bool>();
+ }
+};
+
+// Signed to signed range comparison.
+template <typename Dst, typename Src>
+struct IsValueInRangeFastOp<
+ Dst,
+ Src,
+ typename std::enable_if<std::is_integral<Dst>::value && std::is_integral<Src>::value &&
+ std::is_signed<Dst>::value && std::is_signed<Src>::value &&
+ !IsTypeInRangeForNumericType<Dst, Src>::value>::type>
+{
+ static constexpr bool is_supported = true;
+
+ static constexpr bool Do(Src value)
+ {
+ // Just downcast to the smaller type, sign extend it back to the original
+ // type, and then see if it matches the original value.
+ return value == static_cast<Dst>(value);
+ }
+};
+
+// Signed to unsigned range comparison.
+template <typename Dst, typename Src>
+struct IsValueInRangeFastOp<
+ Dst,
+ Src,
+ typename std::enable_if<std::is_integral<Dst>::value && std::is_integral<Src>::value &&
+ !std::is_signed<Dst>::value && std::is_signed<Src>::value &&
+ !IsTypeInRangeForNumericType<Dst, Src>::value>::type>
+{
+ static constexpr bool is_supported = true;
+
+ static constexpr bool Do(Src value)
+ {
+ // We cast a signed as unsigned to overflow negative values to the top,
+ // then compare against whichever maximum is smaller, as our upper bound.
+ return as_unsigned(value) <= as_unsigned(CommonMax<Src, Dst>());
+ }
+};
+
+// Convenience function that returns true if the supplied value is in range
+// for the destination type.
+template <typename Dst, typename Src>
+constexpr bool IsValueInRangeForNumericType(Src value)
+{
+ using SrcType = typename internal::UnderlyingType<Src>::type;
+ return internal::IsValueInRangeFastOp<Dst, SrcType>::is_supported
+ ? internal::IsValueInRangeFastOp<Dst, SrcType>::Do(static_cast<SrcType>(value))
+ : internal::DstRangeRelationToSrcRange<Dst>(static_cast<SrcType>(value)).IsValid();
+}
+
+// checked_cast<> is analogous to static_cast<> for numeric types,
+// except that it CHECKs that the specified numeric conversion will not
+// overflow or underflow. NaN source will always trigger a CHECK.
+template <typename Dst, class CheckHandler = internal::CheckOnFailure, typename Src>
+constexpr Dst checked_cast(Src value)
+{
+ // This throws a compile-time error on evaluating the constexpr if it can be
+ // determined at compile-time as failing, otherwise it will CHECK at runtime.
+ using SrcType = typename internal::UnderlyingType<Src>::type;
+ return BASE_NUMERICS_LIKELY((IsValueInRangeForNumericType<Dst>(value)))
+ ? static_cast<Dst>(static_cast<SrcType>(value))
+ : CheckHandler::template HandleFailure<Dst>();
+}
+
+// Default boundaries for integral/float: max/infinity, lowest/-infinity, 0/NaN.
+// You may provide your own limits (e.g. to saturated_cast) so long as you
+// implement all of the static constexpr member functions in the class below.
+template <typename T>
+struct SaturationDefaultLimits : public std::numeric_limits<T>
+{
+ static constexpr T NaN()
+ {
+ return std::numeric_limits<T>::has_quiet_NaN ? std::numeric_limits<T>::quiet_NaN() : T();
+ }
+ using std::numeric_limits<T>::max;
+ static constexpr T Overflow()
+ {
+ return std::numeric_limits<T>::has_infinity ? std::numeric_limits<T>::infinity()
+ : std::numeric_limits<T>::max();
+ }
+ using std::numeric_limits<T>::lowest;
+ static constexpr T Underflow()
+ {
+ return std::numeric_limits<T>::has_infinity ? std::numeric_limits<T>::infinity() * -1
+ : std::numeric_limits<T>::lowest();
+ }
+};
+
+template <typename Dst, template <typename> class S, typename Src>
+constexpr Dst saturated_cast_impl(Src value, RangeCheck constraint)
+{
+ // For some reason clang generates much better code when the branch is
+ // structured exactly this way, rather than a sequence of checks.
+ return !constraint.IsOverflowFlagSet()
+ ? (!constraint.IsUnderflowFlagSet() ? static_cast<Dst>(value) : S<Dst>::Underflow())
+ // Skip this check for integral Src, which cannot be NaN.
+ : (std::is_integral<Src>::value || !constraint.IsUnderflowFlagSet()
+ ? S<Dst>::Overflow()
+ : S<Dst>::NaN());
+}
+
+// We can reduce the number of conditions and get slightly better performance
+// for normal signed and unsigned integer ranges. And in the specific case of
+// Arm, we can use the optimized saturation instructions.
+template <typename Dst, typename Src, typename Enable = void>
+struct SaturateFastOp
+{
+ static constexpr bool is_supported = false;
+ static constexpr Dst Do(Src value)
+ {
+ // Force a compile failure if instantiated.
+ return CheckOnFailure::template HandleFailure<Dst>();
+ }
+};
+
+template <typename Dst, typename Src>
+struct SaturateFastOp<
+ Dst,
+ Src,
+ typename std::enable_if<std::is_integral<Src>::value && std::is_integral<Dst>::value &&
+ SaturateFastAsmOp<Dst, Src>::is_supported>::type>
+{
+ static constexpr bool is_supported = true;
+ static constexpr Dst Do(Src value) { return SaturateFastAsmOp<Dst, Src>::Do(value); }
+};
+
+template <typename Dst, typename Src>
+struct SaturateFastOp<
+ Dst,
+ Src,
+ typename std::enable_if<std::is_integral<Src>::value && std::is_integral<Dst>::value &&
+ !SaturateFastAsmOp<Dst, Src>::is_supported>::type>
+{
+ static constexpr bool is_supported = true;
+ static constexpr Dst Do(Src value)
+ {
+ // The exact order of the following is structured to hit the correct
+ // optimization heuristics across compilers. Do not change without
+ // checking the emitted code.
+ const Dst saturated = CommonMaxOrMin<Dst, Src>(
+ IsMaxInRangeForNumericType<Dst, Src>() ||
+ (!IsMinInRangeForNumericType<Dst, Src>() && IsValueNegative(value)));
+ return BASE_NUMERICS_LIKELY(IsValueInRangeForNumericType<Dst>(value))
+ ? static_cast<Dst>(value)
+ : saturated;
+ }
+};
+
+// saturated_cast<> is analogous to static_cast<> for numeric types, except
+// that the specified numeric conversion will saturate by default rather than
+// overflow or underflow, and NaN assignment to an integral will return 0.
+// All boundary condition behaviors can be overriden with a custom handler.
+template <typename Dst,
+ template <typename> class SaturationHandler = SaturationDefaultLimits,
+ typename Src>
+constexpr Dst saturated_cast(Src value)
+{
+ using SrcType = typename UnderlyingType<Src>::type;
+ return !IsCompileTimeConstant(value) && SaturateFastOp<Dst, SrcType>::is_supported &&
+ std::is_same<SaturationHandler<Dst>, SaturationDefaultLimits<Dst>>::value
+ ? SaturateFastOp<Dst, SrcType>::Do(static_cast<SrcType>(value))
+ : saturated_cast_impl<Dst, SaturationHandler, SrcType>(
+ static_cast<SrcType>(value),
+ DstRangeRelationToSrcRange<Dst, SaturationHandler, SrcType>(
+ static_cast<SrcType>(value)));
+}
+
+// strict_cast<> is analogous to static_cast<> for numeric types, except that
+// it will cause a compile failure if the destination type is not large enough
+// to contain any value in the source type. It performs no runtime checking.
+template <typename Dst, typename Src>
+constexpr Dst strict_cast(Src value)
+{
+ using SrcType = typename UnderlyingType<Src>::type;
+ static_assert(UnderlyingType<Src>::is_numeric, "Argument must be numeric.");
+ static_assert(std::is_arithmetic<Dst>::value, "Result must be numeric.");
+
+ // If you got here from a compiler error, it's because you tried to assign
+ // from a source type to a destination type that has insufficient range.
+ // The solution may be to change the destination type you're assigning to,
+ // and use one large enough to represent the source.
+ // Alternatively, you may be better served with the checked_cast<> or
+ // saturated_cast<> template functions for your particular use case.
+ static_assert(StaticDstRangeRelationToSrcRange<Dst, SrcType>::value == NUMERIC_RANGE_CONTAINED,
+ "The source type is out of range for the destination type. "
+ "Please see strict_cast<> comments for more information.");
+
+ return static_cast<Dst>(static_cast<SrcType>(value));
+}
+
+// Some wrappers to statically check that a type is in range.
+template <typename Dst, typename Src, class Enable = void>
+struct IsNumericRangeContained
+{
+ static constexpr bool value = false;
+};
+
+template <typename Dst, typename Src>
+struct IsNumericRangeContained<
+ Dst,
+ Src,
+ typename std::enable_if<ArithmeticOrUnderlyingEnum<Dst>::value &&
+ ArithmeticOrUnderlyingEnum<Src>::value>::type>
+{
+ static constexpr bool value =
+ StaticDstRangeRelationToSrcRange<Dst, Src>::value == NUMERIC_RANGE_CONTAINED;
+};
+
+// StrictNumeric implements compile time range checking between numeric types by
+// wrapping assignment operations in a strict_cast. This class is intended to be
+// used for function arguments and return types, to ensure the destination type
+// can always contain the source type. This is essentially the same as enforcing
+// -Wconversion in gcc and C4302 warnings on MSVC, but it can be applied
+// incrementally at API boundaries, making it easier to convert code so that it
+// compiles cleanly with truncation warnings enabled.
+// This template should introduce no runtime overhead, but it also provides no
+// runtime checking of any of the associated mathematical operations. Use
+// CheckedNumeric for runtime range checks of the actual value being assigned.
+template <typename T>
+class StrictNumeric
+{
+ public:
+ using type = T;
+
+ constexpr StrictNumeric() : value_(0) {}
+
+ // Copy constructor.
+ template <typename Src>
+ constexpr StrictNumeric(const StrictNumeric<Src> &rhs) : value_(strict_cast<T>(rhs.value_))
+ {}
+
+ // This is not an explicit constructor because we implicitly upgrade regular
+ // numerics to StrictNumerics to make them easier to use.
+ template <typename Src>
+ constexpr StrictNumeric(Src value) // NOLINT(runtime/explicit)
+ : value_(strict_cast<T>(value))
+ {}
+
+ // If you got here from a compiler error, it's because you tried to assign
+ // from a source type to a destination type that has insufficient range.
+ // The solution may be to change the destination type you're assigning to,
+ // and use one large enough to represent the source.
+ // If you're assigning from a CheckedNumeric<> class, you may be able to use
+ // the AssignIfValid() member function, specify a narrower destination type to
+ // the member value functions (e.g. val.template ValueOrDie<Dst>()), use one
+ // of the value helper functions (e.g. ValueOrDieForType<Dst>(val)).
+ // If you've encountered an _ambiguous overload_ you can use a static_cast<>
+ // to explicitly cast the result to the destination type.
+ // If none of that works, you may be better served with the checked_cast<> or
+ // saturated_cast<> template functions for your particular use case.
+ template <typename Dst,
+ typename std::enable_if<IsNumericRangeContained<Dst, T>::value>::type * = nullptr>
+ constexpr operator Dst() const
+ {
+ return static_cast<typename ArithmeticOrUnderlyingEnum<Dst>::type>(value_);
+ }
+
+ private:
+ const T value_;
+};
+
+// Convience wrapper returns a StrictNumeric from the provided arithmetic type.
+template <typename T>
+constexpr StrictNumeric<typename UnderlyingType<T>::type> MakeStrictNum(const T value)
+{
+ return value;
+}
+
+#if !BASE_NUMERICS_DISABLE_OSTREAM_OPERATORS
+// Overload the ostream output operator to make logging work nicely.
+template <typename T>
+std::ostream &operator<<(std::ostream &os, const StrictNumeric<T> &value)
+{
+ os << static_cast<T>(value);
+ return os;
+}
+#endif
+
+#define BASE_NUMERIC_COMPARISON_OPERATORS(CLASS, NAME, OP) \
+ template <typename L, typename R, \
+ typename std::enable_if<internal::Is##CLASS##Op<L, R>::value>::type * = nullptr> \
+ constexpr bool operator OP(const L lhs, const R rhs) \
+ { \
+ return SafeCompare<NAME, typename UnderlyingType<L>::type, \
+ typename UnderlyingType<R>::type>(lhs, rhs); \
+ }
+
+BASE_NUMERIC_COMPARISON_OPERATORS(Strict, IsLess, <)
+BASE_NUMERIC_COMPARISON_OPERATORS(Strict, IsLessOrEqual, <=)
+BASE_NUMERIC_COMPARISON_OPERATORS(Strict, IsGreater, >)
+BASE_NUMERIC_COMPARISON_OPERATORS(Strict, IsGreaterOrEqual, >=)
+BASE_NUMERIC_COMPARISON_OPERATORS(Strict, IsEqual, ==)
+BASE_NUMERIC_COMPARISON_OPERATORS(Strict, IsNotEqual, !=)
+
+} // namespace internal
+
+using internal::as_signed;
+using internal::as_unsigned;
+using internal::checked_cast;
+using internal::IsTypeInRangeForNumericType;
+using internal::IsValueInRangeForNumericType;
+using internal::IsValueNegative;
+using internal::MakeStrictNum;
+using internal::SafeUnsignedAbs;
+using internal::saturated_cast;
+using internal::strict_cast;
+using internal::StrictNumeric;
+
+// Explicitly make a shorter size_t alias for convenience.
+using SizeT = StrictNumeric<size_t>;
+
+// floating -> integral conversions that saturate and thus can actually return
+// an integral type. In most cases, these should be preferred over the std::
+// versions.
+template <
+ typename Dst = int,
+ typename Src,
+ typename = std::enable_if_t<std::is_integral<Dst>::value && std::is_floating_point<Src>::value>>
+Dst ClampFloor(Src value)
+{
+ return saturated_cast<Dst>(std::floor(value));
+}
+template <
+ typename Dst = int,
+ typename Src,
+ typename = std::enable_if_t<std::is_integral<Dst>::value && std::is_floating_point<Src>::value>>
+Dst ClampCeil(Src value)
+{
+ return saturated_cast<Dst>(std::ceil(value));
+}
+template <
+ typename Dst = int,
+ typename Src,
+ typename = std::enable_if_t<std::is_integral<Dst>::value && std::is_floating_point<Src>::value>>
+Dst ClampRound(Src value)
+{
+ const Src rounded = (value >= 0.0f) ? std::floor(value + 0.5f) : std::ceil(value - 0.5f);
+ return saturated_cast<Dst>(rounded);
+}
+
+} // namespace base
+} // namespace angle
+
+#endif // BASE_NUMERICS_SAFE_CONVERSIONS_H_
diff --git a/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_conversions_arm_impl.h b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_conversions_arm_impl.h
new file mode 100644
index 0000000000..74e5bcc0c6
--- /dev/null
+++ b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_conversions_arm_impl.h
@@ -0,0 +1,60 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NUMERICS_SAFE_CONVERSIONS_ARM_IMPL_H_
+#define BASE_NUMERICS_SAFE_CONVERSIONS_ARM_IMPL_H_
+
+#include <cassert>
+#include <limits>
+#include <type_traits>
+
+#include "anglebase/numerics/safe_conversions_impl.h"
+
+namespace angle
+{
+namespace base
+{
+namespace internal
+{
+
+// Fast saturation to a destination type.
+template <typename Dst, typename Src>
+struct SaturateFastAsmOp
+{
+ static constexpr bool is_supported =
+ std::is_signed<Src>::value && std::is_integral<Dst>::value &&
+ std::is_integral<Src>::value &&
+ IntegerBitsPlusSign<Src>::value <= IntegerBitsPlusSign<int32_t>::value &&
+ IntegerBitsPlusSign<Dst>::value <= IntegerBitsPlusSign<int32_t>::value &&
+ !IsTypeInRangeForNumericType<Dst, Src>::value;
+
+ __attribute__((always_inline)) static Dst Do(Src value)
+ {
+ int32_t src = value;
+ typename std::conditional<std::is_signed<Dst>::value, int32_t, uint32_t>::type result;
+ if (std::is_signed<Dst>::value)
+ {
+ asm("ssat %[dst], %[shift], %[src]"
+ : [dst] "=r"(result)
+ : [src] "r"(src), [shift] "n"(IntegerBitsPlusSign<Dst>::value <= 32
+ ? IntegerBitsPlusSign<Dst>::value
+ : 32));
+ }
+ else
+ {
+ asm("usat %[dst], %[shift], %[src]"
+ : [dst] "=r"(result)
+ : [src] "r"(src), [shift] "n"(IntegerBitsPlusSign<Dst>::value < 32
+ ? IntegerBitsPlusSign<Dst>::value
+ : 31));
+ }
+ return static_cast<Dst>(result);
+ }
+};
+
+} // namespace internal
+} // namespace base
+} // namespace angle
+
+#endif // BASE_NUMERICS_SAFE_CONVERSIONS_ARM_IMPL_H_
diff --git a/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_conversions_impl.h b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_conversions_impl.h
new file mode 100644
index 0000000000..7d0f031754
--- /dev/null
+++ b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_conversions_impl.h
@@ -0,0 +1,893 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NUMERICS_SAFE_CONVERSIONS_IMPL_H_
+#define BASE_NUMERICS_SAFE_CONVERSIONS_IMPL_H_
+
+#include <stdint.h>
+
+#include <limits>
+#include <type_traits>
+
+#if defined(__GNUC__) || defined(__clang__)
+# define BASE_NUMERICS_LIKELY(x) __builtin_expect(!!(x), 1)
+# define BASE_NUMERICS_UNLIKELY(x) __builtin_expect(!!(x), 0)
+#else
+# define BASE_NUMERICS_LIKELY(x) (x)
+# define BASE_NUMERICS_UNLIKELY(x) (x)
+#endif
+
+namespace angle
+{
+namespace base
+{
+namespace internal
+{
+
+// The std library doesn't provide a binary max_exponent for integers, however
+// we can compute an analog using std::numeric_limits<>::digits.
+template <typename NumericType>
+struct MaxExponent
+{
+ static const int value = std::is_floating_point<NumericType>::value
+ ? std::numeric_limits<NumericType>::max_exponent
+ : std::numeric_limits<NumericType>::digits + 1;
+};
+
+// The number of bits (including the sign) in an integer. Eliminates sizeof
+// hacks.
+template <typename NumericType>
+struct IntegerBitsPlusSign
+{
+ static const int value =
+ std::numeric_limits<NumericType>::digits + std::is_signed<NumericType>::value;
+};
+
+// Helper templates for integer manipulations.
+
+template <typename Integer>
+struct PositionOfSignBit
+{
+ static const size_t value = IntegerBitsPlusSign<Integer>::value - 1;
+};
+
+// Determines if a numeric value is negative without throwing compiler
+// warnings on: unsigned(value) < 0.
+template <typename T, typename std::enable_if<std::is_signed<T>::value>::type * = nullptr>
+constexpr bool IsValueNegative(T value)
+{
+ static_assert(std::is_arithmetic<T>::value, "Argument must be numeric.");
+ return value < 0;
+}
+
+template <typename T, typename std::enable_if<!std::is_signed<T>::value>::type * = nullptr>
+constexpr bool IsValueNegative(T)
+{
+ static_assert(std::is_arithmetic<T>::value, "Argument must be numeric.");
+ return false;
+}
+
+// This performs a fast negation, returning a signed value. It works on unsigned
+// arguments, but probably doesn't do what you want for any unsigned value
+// larger than max / 2 + 1 (i.e. signed min cast to unsigned).
+template <typename T>
+constexpr typename std::make_signed<T>::type ConditionalNegate(T x, bool is_negative)
+{
+ static_assert(std::is_integral<T>::value, "Type must be integral");
+ using SignedT = typename std::make_signed<T>::type;
+ using UnsignedT = typename std::make_unsigned<T>::type;
+ return static_cast<SignedT>((static_cast<UnsignedT>(x) ^ -SignedT(is_negative)) + is_negative);
+}
+
+// This performs a safe, absolute value via unsigned overflow.
+template <typename T>
+constexpr typename std::make_unsigned<T>::type SafeUnsignedAbs(T value)
+{
+ static_assert(std::is_integral<T>::value, "Type must be integral");
+ using UnsignedT = typename std::make_unsigned<T>::type;
+ return IsValueNegative(value) ? static_cast<UnsignedT>(0u - static_cast<UnsignedT>(value))
+ : static_cast<UnsignedT>(value);
+}
+
+// This allows us to switch paths on known compile-time constants.
+#if defined(__clang__) || defined(__GNUC__)
+constexpr bool CanDetectCompileTimeConstant()
+{
+ return true;
+}
+template <typename T>
+constexpr bool IsCompileTimeConstant(const T v)
+{
+ return __builtin_constant_p(v);
+}
+#else
+constexpr bool CanDetectCompileTimeConstant()
+{
+ return false;
+}
+template <typename T>
+constexpr bool IsCompileTimeConstant(const T)
+{
+ return false;
+}
+#endif
+template <typename T>
+constexpr bool MustTreatAsConstexpr(const T v)
+{
+ // Either we can't detect a compile-time constant, and must always use the
+ // constexpr path, or we know we have a compile-time constant.
+ return !CanDetectCompileTimeConstant() || IsCompileTimeConstant(v);
+}
+
+// Forces a crash, like a CHECK(false). Used for numeric boundary errors.
+// Also used in a constexpr template to trigger a compilation failure on
+// an error condition.
+struct CheckOnFailure
+{
+ template <typename T>
+ static T HandleFailure()
+ {
+#if defined(_MSC_VER)
+ __debugbreak();
+#elif defined(__GNUC__) || defined(__clang__)
+ __builtin_trap();
+#else
+ ((void)(*(volatile char *)0 = 0));
+#endif
+ return T();
+ }
+};
+
+enum IntegerRepresentation
+{
+ INTEGER_REPRESENTATION_UNSIGNED,
+ INTEGER_REPRESENTATION_SIGNED
+};
+
+// A range for a given nunmeric Src type is contained for a given numeric Dst
+// type if both numeric_limits<Src>::max() <= numeric_limits<Dst>::max() and
+// numeric_limits<Src>::lowest() >= numeric_limits<Dst>::lowest() are true.
+// We implement this as template specializations rather than simple static
+// comparisons to ensure type correctness in our comparisons.
+enum NumericRangeRepresentation
+{
+ NUMERIC_RANGE_NOT_CONTAINED,
+ NUMERIC_RANGE_CONTAINED
+};
+
+// Helper templates to statically determine if our destination type can contain
+// maximum and minimum values represented by the source type.
+
+template <
+ typename Dst,
+ typename Src,
+ IntegerRepresentation DstSign = std::is_signed<Dst>::value ? INTEGER_REPRESENTATION_SIGNED
+ : INTEGER_REPRESENTATION_UNSIGNED,
+ IntegerRepresentation SrcSign = std::is_signed<Src>::value ? INTEGER_REPRESENTATION_SIGNED
+ : INTEGER_REPRESENTATION_UNSIGNED>
+struct StaticDstRangeRelationToSrcRange;
+
+// Same sign: Dst is guaranteed to contain Src only if its range is equal or
+// larger.
+template <typename Dst, typename Src, IntegerRepresentation Sign>
+struct StaticDstRangeRelationToSrcRange<Dst, Src, Sign, Sign>
+{
+ static const NumericRangeRepresentation value =
+ MaxExponent<Dst>::value >= MaxExponent<Src>::value ? NUMERIC_RANGE_CONTAINED
+ : NUMERIC_RANGE_NOT_CONTAINED;
+};
+
+// Unsigned to signed: Dst is guaranteed to contain source only if its range is
+// larger.
+template <typename Dst, typename Src>
+struct StaticDstRangeRelationToSrcRange<Dst,
+ Src,
+ INTEGER_REPRESENTATION_SIGNED,
+ INTEGER_REPRESENTATION_UNSIGNED>
+{
+ static const NumericRangeRepresentation value =
+ MaxExponent<Dst>::value > MaxExponent<Src>::value ? NUMERIC_RANGE_CONTAINED
+ : NUMERIC_RANGE_NOT_CONTAINED;
+};
+
+// Signed to unsigned: Dst cannot be statically determined to contain Src.
+template <typename Dst, typename Src>
+struct StaticDstRangeRelationToSrcRange<Dst,
+ Src,
+ INTEGER_REPRESENTATION_UNSIGNED,
+ INTEGER_REPRESENTATION_SIGNED>
+{
+ static const NumericRangeRepresentation value = NUMERIC_RANGE_NOT_CONTAINED;
+};
+
+// This class wraps the range constraints as separate booleans so the compiler
+// can identify constants and eliminate unused code paths.
+class RangeCheck
+{
+ public:
+ constexpr RangeCheck(bool is_in_lower_bound, bool is_in_upper_bound)
+ : is_underflow_(!is_in_lower_bound), is_overflow_(!is_in_upper_bound)
+ {}
+ constexpr RangeCheck() : is_underflow_(0), is_overflow_(0) {}
+ constexpr bool IsValid() const { return !is_overflow_ && !is_underflow_; }
+ constexpr bool IsInvalid() const { return is_overflow_ && is_underflow_; }
+ constexpr bool IsOverflow() const { return is_overflow_ && !is_underflow_; }
+ constexpr bool IsUnderflow() const { return !is_overflow_ && is_underflow_; }
+ constexpr bool IsOverflowFlagSet() const { return is_overflow_; }
+ constexpr bool IsUnderflowFlagSet() const { return is_underflow_; }
+ constexpr bool operator==(const RangeCheck rhs) const
+ {
+ return is_underflow_ == rhs.is_underflow_ && is_overflow_ == rhs.is_overflow_;
+ }
+ constexpr bool operator!=(const RangeCheck rhs) const { return !(*this == rhs); }
+
+ private:
+ // Do not change the order of these member variables. The integral conversion
+ // optimization depends on this exact order.
+ const bool is_underflow_;
+ const bool is_overflow_;
+};
+
+// The following helper template addresses a corner case in range checks for
+// conversion from a floating-point type to an integral type of smaller range
+// but larger precision (e.g. float -> unsigned). The problem is as follows:
+// 1. Integral maximum is always one less than a power of two, so it must be
+// truncated to fit the mantissa of the floating point. The direction of
+// rounding is implementation defined, but by default it's always IEEE
+// floats, which round to nearest and thus result in a value of larger
+// magnitude than the integral value.
+// Example: float f = UINT_MAX; // f is 4294967296f but UINT_MAX
+// // is 4294967295u.
+// 2. If the floating point value is equal to the promoted integral maximum
+// value, a range check will erroneously pass.
+// Example: (4294967296f <= 4294967295u) // This is true due to a precision
+// // loss in rounding up to float.
+// 3. When the floating point value is then converted to an integral, the
+// resulting value is out of range for the target integral type and
+// thus is implementation defined.
+// Example: unsigned u = (float)INT_MAX; // u will typically overflow to 0.
+// To fix this bug we manually truncate the maximum value when the destination
+// type is an integral of larger precision than the source floating-point type,
+// such that the resulting maximum is represented exactly as a floating point.
+template <typename Dst, typename Src, template <typename> class Bounds>
+struct NarrowingRange
+{
+ using SrcLimits = std::numeric_limits<Src>;
+ using DstLimits = typename std::numeric_limits<Dst>;
+
+ // Computes the mask required to make an accurate comparison between types.
+ static const int kShift =
+ (MaxExponent<Src>::value > MaxExponent<Dst>::value && SrcLimits::digits < DstLimits::digits)
+ ? (DstLimits::digits - SrcLimits::digits)
+ : 0;
+ template <typename T, typename std::enable_if<std::is_integral<T>::value>::type * = nullptr>
+
+ // Masks out the integer bits that are beyond the precision of the
+ // intermediate type used for comparison.
+ static constexpr T Adjust(T value)
+ {
+ static_assert(std::is_same<T, Dst>::value, "");
+ static_assert(kShift < DstLimits::digits, "");
+ return static_cast<T>(ConditionalNegate(SafeUnsignedAbs(value) & ~((T(1) << kShift) - T(1)),
+ IsValueNegative(value)));
+ }
+
+ template <typename T,
+ typename std::enable_if<std::is_floating_point<T>::value>::type * = nullptr>
+ static constexpr T Adjust(T value)
+ {
+ static_assert(std::is_same<T, Dst>::value, "");
+ static_assert(kShift == 0, "");
+ return value;
+ }
+
+ static constexpr Dst max() { return Adjust(Bounds<Dst>::max()); }
+ static constexpr Dst lowest() { return Adjust(Bounds<Dst>::lowest()); }
+};
+
+template <
+ typename Dst,
+ typename Src,
+ template <typename>
+ class Bounds,
+ IntegerRepresentation DstSign = std::is_signed<Dst>::value ? INTEGER_REPRESENTATION_SIGNED
+ : INTEGER_REPRESENTATION_UNSIGNED,
+ IntegerRepresentation SrcSign = std::is_signed<Src>::value ? INTEGER_REPRESENTATION_SIGNED
+ : INTEGER_REPRESENTATION_UNSIGNED,
+ NumericRangeRepresentation DstRange = StaticDstRangeRelationToSrcRange<Dst, Src>::value>
+struct DstRangeRelationToSrcRangeImpl;
+
+// The following templates are for ranges that must be verified at runtime. We
+// split it into checks based on signedness to avoid confusing casts and
+// compiler warnings on signed an unsigned comparisons.
+
+// Same sign narrowing: The range is contained for normal limits.
+template <typename Dst,
+ typename Src,
+ template <typename>
+ class Bounds,
+ IntegerRepresentation DstSign,
+ IntegerRepresentation SrcSign>
+struct DstRangeRelationToSrcRangeImpl<Dst, Src, Bounds, DstSign, SrcSign, NUMERIC_RANGE_CONTAINED>
+{
+ static constexpr RangeCheck Check(Src value)
+ {
+ using SrcLimits = std::numeric_limits<Src>;
+ using DstLimits = NarrowingRange<Dst, Src, Bounds>;
+ return RangeCheck(static_cast<Dst>(SrcLimits::lowest()) >= DstLimits::lowest() ||
+ static_cast<Dst>(value) >= DstLimits::lowest(),
+ static_cast<Dst>(SrcLimits::max()) <= DstLimits::max() ||
+ static_cast<Dst>(value) <= DstLimits::max());
+ }
+};
+
+// Signed to signed narrowing: Both the upper and lower boundaries may be
+// exceeded for standard limits.
+template <typename Dst, typename Src, template <typename> class Bounds>
+struct DstRangeRelationToSrcRangeImpl<Dst,
+ Src,
+ Bounds,
+ INTEGER_REPRESENTATION_SIGNED,
+ INTEGER_REPRESENTATION_SIGNED,
+ NUMERIC_RANGE_NOT_CONTAINED>
+{
+ static constexpr RangeCheck Check(Src value)
+ {
+ using DstLimits = NarrowingRange<Dst, Src, Bounds>;
+ return RangeCheck(value >= DstLimits::lowest(), value <= DstLimits::max());
+ }
+};
+
+// Unsigned to unsigned narrowing: Only the upper bound can be exceeded for
+// standard limits.
+template <typename Dst, typename Src, template <typename> class Bounds>
+struct DstRangeRelationToSrcRangeImpl<Dst,
+ Src,
+ Bounds,
+ INTEGER_REPRESENTATION_UNSIGNED,
+ INTEGER_REPRESENTATION_UNSIGNED,
+ NUMERIC_RANGE_NOT_CONTAINED>
+{
+ static constexpr RangeCheck Check(Src value)
+ {
+ using DstLimits = NarrowingRange<Dst, Src, Bounds>;
+ return RangeCheck(DstLimits::lowest() == Dst(0) || value >= DstLimits::lowest(),
+ value <= DstLimits::max());
+ }
+};
+
+// Unsigned to signed: Only the upper bound can be exceeded for standard limits.
+template <typename Dst, typename Src, template <typename> class Bounds>
+struct DstRangeRelationToSrcRangeImpl<Dst,
+ Src,
+ Bounds,
+ INTEGER_REPRESENTATION_SIGNED,
+ INTEGER_REPRESENTATION_UNSIGNED,
+ NUMERIC_RANGE_NOT_CONTAINED>
+{
+ static constexpr RangeCheck Check(Src value)
+ {
+ using DstLimits = NarrowingRange<Dst, Src, Bounds>;
+ using Promotion = decltype(Src() + Dst());
+ return RangeCheck(
+ DstLimits::lowest() <= Dst(0) ||
+ static_cast<Promotion>(value) >= static_cast<Promotion>(DstLimits::lowest()),
+ static_cast<Promotion>(value) <= static_cast<Promotion>(DstLimits::max()));
+ }
+};
+
+// Signed to unsigned: The upper boundary may be exceeded for a narrower Dst,
+// and any negative value exceeds the lower boundary for standard limits.
+template <typename Dst, typename Src, template <typename> class Bounds>
+struct DstRangeRelationToSrcRangeImpl<Dst,
+ Src,
+ Bounds,
+ INTEGER_REPRESENTATION_UNSIGNED,
+ INTEGER_REPRESENTATION_SIGNED,
+ NUMERIC_RANGE_NOT_CONTAINED>
+{
+ static constexpr RangeCheck Check(Src value)
+ {
+ using SrcLimits = std::numeric_limits<Src>;
+ using DstLimits = NarrowingRange<Dst, Src, Bounds>;
+ using Promotion = decltype(Src() + Dst());
+ bool ge_zero = false;
+ // Converting floating-point to integer will discard fractional part, so
+ // values in (-1.0, -0.0) will truncate to 0 and fit in Dst.
+ if (std::is_floating_point<Src>::value)
+ {
+ ge_zero = value > Src(-1);
+ }
+ else
+ {
+ ge_zero = value >= Src(0);
+ }
+ return RangeCheck(
+ ge_zero && (DstLimits::lowest() == 0 || static_cast<Dst>(value) >= DstLimits::lowest()),
+ static_cast<Promotion>(SrcLimits::max()) <= static_cast<Promotion>(DstLimits::max()) ||
+ static_cast<Promotion>(value) <= static_cast<Promotion>(DstLimits::max()));
+ }
+};
+
+// Simple wrapper for statically checking if a type's range is contained.
+template <typename Dst, typename Src>
+struct IsTypeInRangeForNumericType
+{
+ static const bool value =
+ StaticDstRangeRelationToSrcRange<Dst, Src>::value == NUMERIC_RANGE_CONTAINED;
+};
+
+template <typename Dst, template <typename> class Bounds = std::numeric_limits, typename Src>
+constexpr RangeCheck DstRangeRelationToSrcRange(Src value)
+{
+ static_assert(std::is_arithmetic<Src>::value, "Argument must be numeric.");
+ static_assert(std::is_arithmetic<Dst>::value, "Result must be numeric.");
+ static_assert(Bounds<Dst>::lowest() < Bounds<Dst>::max(), "");
+ return DstRangeRelationToSrcRangeImpl<Dst, Src, Bounds>::Check(value);
+}
+
+// Integer promotion templates used by the portable checked integer arithmetic.
+template <size_t Size, bool IsSigned>
+struct IntegerForDigitsAndSign;
+
+#define INTEGER_FOR_DIGITS_AND_SIGN(I) \
+ template <> \
+ struct IntegerForDigitsAndSign<IntegerBitsPlusSign<I>::value, std::is_signed<I>::value> \
+ { \
+ using type = I; \
+ }
+
+INTEGER_FOR_DIGITS_AND_SIGN(int8_t);
+INTEGER_FOR_DIGITS_AND_SIGN(uint8_t);
+INTEGER_FOR_DIGITS_AND_SIGN(int16_t);
+INTEGER_FOR_DIGITS_AND_SIGN(uint16_t);
+INTEGER_FOR_DIGITS_AND_SIGN(int32_t);
+INTEGER_FOR_DIGITS_AND_SIGN(uint32_t);
+INTEGER_FOR_DIGITS_AND_SIGN(int64_t);
+INTEGER_FOR_DIGITS_AND_SIGN(uint64_t);
+#undef INTEGER_FOR_DIGITS_AND_SIGN
+
+// WARNING: We have no IntegerForSizeAndSign<16, *>. If we ever add one to
+// support 128-bit math, then the ArithmeticPromotion template below will need
+// to be updated (or more likely replaced with a decltype expression).
+static_assert(IntegerBitsPlusSign<intmax_t>::value == 64,
+ "Max integer size not supported for this toolchain.");
+
+template <typename Integer, bool IsSigned = std::is_signed<Integer>::value>
+struct TwiceWiderInteger
+{
+ using type =
+ typename IntegerForDigitsAndSign<IntegerBitsPlusSign<Integer>::value * 2, IsSigned>::type;
+};
+
+enum ArithmeticPromotionCategory
+{
+ LEFT_PROMOTION, // Use the type of the left-hand argument.
+ RIGHT_PROMOTION // Use the type of the right-hand argument.
+};
+
+// Determines the type that can represent the largest positive value.
+template <typename Lhs,
+ typename Rhs,
+ ArithmeticPromotionCategory Promotion =
+ (MaxExponent<Lhs>::value > MaxExponent<Rhs>::value) ? LEFT_PROMOTION
+ : RIGHT_PROMOTION>
+struct MaxExponentPromotion;
+
+template <typename Lhs, typename Rhs>
+struct MaxExponentPromotion<Lhs, Rhs, LEFT_PROMOTION>
+{
+ using type = Lhs;
+};
+
+template <typename Lhs, typename Rhs>
+struct MaxExponentPromotion<Lhs, Rhs, RIGHT_PROMOTION>
+{
+ using type = Rhs;
+};
+
+// Determines the type that can represent the lowest arithmetic value.
+template <typename Lhs,
+ typename Rhs,
+ ArithmeticPromotionCategory Promotion =
+ std::is_signed<Lhs>::value
+ ? (std::is_signed<Rhs>::value
+ ? (MaxExponent<Lhs>::value > MaxExponent<Rhs>::value ? LEFT_PROMOTION
+ : RIGHT_PROMOTION)
+ : LEFT_PROMOTION)
+ : (std::is_signed<Rhs>::value
+ ? RIGHT_PROMOTION
+ : (MaxExponent<Lhs>::value < MaxExponent<Rhs>::value ? LEFT_PROMOTION
+ : RIGHT_PROMOTION))>
+struct LowestValuePromotion;
+
+template <typename Lhs, typename Rhs>
+struct LowestValuePromotion<Lhs, Rhs, LEFT_PROMOTION>
+{
+ using type = Lhs;
+};
+
+template <typename Lhs, typename Rhs>
+struct LowestValuePromotion<Lhs, Rhs, RIGHT_PROMOTION>
+{
+ using type = Rhs;
+};
+
+// Determines the type that is best able to represent an arithmetic result.
+template <typename Lhs,
+ typename Rhs = Lhs,
+ bool is_intmax_type =
+ std::is_integral<typename MaxExponentPromotion<Lhs, Rhs>::type>::value
+ &&IntegerBitsPlusSign<typename MaxExponentPromotion<Lhs, Rhs>::type>::value ==
+ IntegerBitsPlusSign<intmax_t>::value,
+ bool is_max_exponent =
+ StaticDstRangeRelationToSrcRange<typename MaxExponentPromotion<Lhs, Rhs>::type,
+ Lhs>::value ==
+ NUMERIC_RANGE_CONTAINED
+ &&StaticDstRangeRelationToSrcRange<typename MaxExponentPromotion<Lhs, Rhs>::type,
+ Rhs>::value == NUMERIC_RANGE_CONTAINED>
+struct BigEnoughPromotion;
+
+// The side with the max exponent is big enough.
+template <typename Lhs, typename Rhs, bool is_intmax_type>
+struct BigEnoughPromotion<Lhs, Rhs, is_intmax_type, true>
+{
+ using type = typename MaxExponentPromotion<Lhs, Rhs>::type;
+ static const bool is_contained = true;
+};
+
+// We can use a twice wider type to fit.
+template <typename Lhs, typename Rhs>
+struct BigEnoughPromotion<Lhs, Rhs, false, false>
+{
+ using type =
+ typename TwiceWiderInteger<typename MaxExponentPromotion<Lhs, Rhs>::type,
+ std::is_signed<Lhs>::value || std::is_signed<Rhs>::value>::type;
+ static const bool is_contained = true;
+};
+
+// No type is large enough.
+template <typename Lhs, typename Rhs>
+struct BigEnoughPromotion<Lhs, Rhs, true, false>
+{
+ using type = typename MaxExponentPromotion<Lhs, Rhs>::type;
+ static const bool is_contained = false;
+};
+
+// We can statically check if operations on the provided types can wrap, so we
+// can skip the checked operations if they're not needed. So, for an integer we
+// care if the destination type preserves the sign and is twice the width of
+// the source.
+template <typename T, typename Lhs, typename Rhs = Lhs>
+struct IsIntegerArithmeticSafe
+{
+ static const bool value =
+ !std::is_floating_point<T>::value && !std::is_floating_point<Lhs>::value &&
+ !std::is_floating_point<Rhs>::value &&
+ std::is_signed<T>::value >= std::is_signed<Lhs>::value &&
+ IntegerBitsPlusSign<T>::value >= (2 * IntegerBitsPlusSign<Lhs>::value) &&
+ std::is_signed<T>::value >= std::is_signed<Rhs>::value &&
+ IntegerBitsPlusSign<T>::value >= (2 * IntegerBitsPlusSign<Rhs>::value);
+};
+
+// Promotes to a type that can represent any possible result of a binary
+// arithmetic operation with the source types.
+template <typename Lhs,
+ typename Rhs,
+ bool is_promotion_possible = IsIntegerArithmeticSafe<
+ typename std::conditional<std::is_signed<Lhs>::value || std::is_signed<Rhs>::value,
+ intmax_t,
+ uintmax_t>::type,
+ typename MaxExponentPromotion<Lhs, Rhs>::type>::value>
+struct FastIntegerArithmeticPromotion;
+
+template <typename Lhs, typename Rhs>
+struct FastIntegerArithmeticPromotion<Lhs, Rhs, true>
+{
+ using type =
+ typename TwiceWiderInteger<typename MaxExponentPromotion<Lhs, Rhs>::type,
+ std::is_signed<Lhs>::value || std::is_signed<Rhs>::value>::type;
+ static_assert(IsIntegerArithmeticSafe<type, Lhs, Rhs>::value, "");
+ static const bool is_contained = true;
+};
+
+template <typename Lhs, typename Rhs>
+struct FastIntegerArithmeticPromotion<Lhs, Rhs, false>
+{
+ using type = typename BigEnoughPromotion<Lhs, Rhs>::type;
+ static const bool is_contained = false;
+};
+
+// Extracts the underlying type from an enum.
+template <typename T, bool is_enum = std::is_enum<T>::value>
+struct ArithmeticOrUnderlyingEnum;
+
+template <typename T>
+struct ArithmeticOrUnderlyingEnum<T, true>
+{
+ using type = typename std::underlying_type<T>::type;
+ static const bool value = std::is_arithmetic<type>::value;
+};
+
+template <typename T>
+struct ArithmeticOrUnderlyingEnum<T, false>
+{
+ using type = T;
+ static const bool value = std::is_arithmetic<type>::value;
+};
+
+// The following are helper templates used in the CheckedNumeric class.
+template <typename T>
+class CheckedNumeric;
+
+template <typename T>
+class ClampedNumeric;
+
+template <typename T>
+class StrictNumeric;
+
+// Used to treat CheckedNumeric and arithmetic underlying types the same.
+template <typename T>
+struct UnderlyingType
+{
+ using type = typename ArithmeticOrUnderlyingEnum<T>::type;
+ static const bool is_numeric = std::is_arithmetic<type>::value;
+ static const bool is_checked = false;
+ static const bool is_clamped = false;
+ static const bool is_strict = false;
+};
+
+template <typename T>
+struct UnderlyingType<CheckedNumeric<T>>
+{
+ using type = T;
+ static const bool is_numeric = true;
+ static const bool is_checked = true;
+ static const bool is_clamped = false;
+ static const bool is_strict = false;
+};
+
+template <typename T>
+struct UnderlyingType<ClampedNumeric<T>>
+{
+ using type = T;
+ static const bool is_numeric = true;
+ static const bool is_checked = false;
+ static const bool is_clamped = true;
+ static const bool is_strict = false;
+};
+
+template <typename T>
+struct UnderlyingType<StrictNumeric<T>>
+{
+ using type = T;
+ static const bool is_numeric = true;
+ static const bool is_checked = false;
+ static const bool is_clamped = false;
+ static const bool is_strict = true;
+};
+
+template <typename L, typename R>
+struct IsCheckedOp
+{
+ static const bool value = UnderlyingType<L>::is_numeric && UnderlyingType<R>::is_numeric &&
+ (UnderlyingType<L>::is_checked || UnderlyingType<R>::is_checked);
+};
+
+template <typename L, typename R>
+struct IsClampedOp
+{
+ static const bool value = UnderlyingType<L>::is_numeric && UnderlyingType<R>::is_numeric &&
+ (UnderlyingType<L>::is_clamped || UnderlyingType<R>::is_clamped) &&
+ !(UnderlyingType<L>::is_checked || UnderlyingType<R>::is_checked);
+};
+
+template <typename L, typename R>
+struct IsStrictOp
+{
+ static const bool value = UnderlyingType<L>::is_numeric && UnderlyingType<R>::is_numeric &&
+ (UnderlyingType<L>::is_strict || UnderlyingType<R>::is_strict) &&
+ !(UnderlyingType<L>::is_checked || UnderlyingType<R>::is_checked) &&
+ !(UnderlyingType<L>::is_clamped || UnderlyingType<R>::is_clamped);
+};
+
+// as_signed<> returns the supplied integral value (or integral castable
+// Numeric template) cast as a signed integral of equivalent precision.
+// I.e. it's mostly an alias for: static_cast<std::make_signed<T>::type>(t)
+template <typename Src>
+constexpr typename std::make_signed<typename base::internal::UnderlyingType<Src>::type>::type
+as_signed(const Src value)
+{
+ static_assert(std::is_integral<decltype(as_signed(value))>::value,
+ "Argument must be a signed or unsigned integer type.");
+ return static_cast<decltype(as_signed(value))>(value);
+}
+
+// as_unsigned<> returns the supplied integral value (or integral castable
+// Numeric template) cast as an unsigned integral of equivalent precision.
+// I.e. it's mostly an alias for: static_cast<std::make_unsigned<T>::type>(t)
+template <typename Src>
+constexpr typename std::make_unsigned<typename base::internal::UnderlyingType<Src>::type>::type
+as_unsigned(const Src value)
+{
+ static_assert(std::is_integral<decltype(as_unsigned(value))>::value,
+ "Argument must be a signed or unsigned integer type.");
+ return static_cast<decltype(as_unsigned(value))>(value);
+}
+
+template <typename L, typename R>
+constexpr bool IsLessImpl(const L lhs,
+ const R rhs,
+ const RangeCheck l_range,
+ const RangeCheck r_range)
+{
+ return l_range.IsUnderflow() || r_range.IsOverflow() ||
+ (l_range == r_range &&
+ static_cast<decltype(lhs + rhs)>(lhs) < static_cast<decltype(lhs + rhs)>(rhs));
+}
+
+template <typename L, typename R>
+struct IsLess
+{
+ static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+ "Types must be numeric.");
+ static constexpr bool Test(const L lhs, const R rhs)
+ {
+ return IsLessImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
+ DstRangeRelationToSrcRange<L>(rhs));
+ }
+};
+
+template <typename L, typename R>
+constexpr bool IsLessOrEqualImpl(const L lhs,
+ const R rhs,
+ const RangeCheck l_range,
+ const RangeCheck r_range)
+{
+ return l_range.IsUnderflow() || r_range.IsOverflow() ||
+ (l_range == r_range &&
+ static_cast<decltype(lhs + rhs)>(lhs) <= static_cast<decltype(lhs + rhs)>(rhs));
+}
+
+template <typename L, typename R>
+struct IsLessOrEqual
+{
+ static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+ "Types must be numeric.");
+ static constexpr bool Test(const L lhs, const R rhs)
+ {
+ return IsLessOrEqualImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
+ DstRangeRelationToSrcRange<L>(rhs));
+ }
+};
+
+template <typename L, typename R>
+constexpr bool IsGreaterImpl(const L lhs,
+ const R rhs,
+ const RangeCheck l_range,
+ const RangeCheck r_range)
+{
+ return l_range.IsOverflow() || r_range.IsUnderflow() ||
+ (l_range == r_range &&
+ static_cast<decltype(lhs + rhs)>(lhs) > static_cast<decltype(lhs + rhs)>(rhs));
+}
+
+template <typename L, typename R>
+struct IsGreater
+{
+ static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+ "Types must be numeric.");
+ static constexpr bool Test(const L lhs, const R rhs)
+ {
+ return IsGreaterImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
+ DstRangeRelationToSrcRange<L>(rhs));
+ }
+};
+
+template <typename L, typename R>
+constexpr bool IsGreaterOrEqualImpl(const L lhs,
+ const R rhs,
+ const RangeCheck l_range,
+ const RangeCheck r_range)
+{
+ return l_range.IsOverflow() || r_range.IsUnderflow() ||
+ (l_range == r_range &&
+ static_cast<decltype(lhs + rhs)>(lhs) >= static_cast<decltype(lhs + rhs)>(rhs));
+}
+
+template <typename L, typename R>
+struct IsGreaterOrEqual
+{
+ static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+ "Types must be numeric.");
+ static constexpr bool Test(const L lhs, const R rhs)
+ {
+ return IsGreaterOrEqualImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
+ DstRangeRelationToSrcRange<L>(rhs));
+ }
+};
+
+template <typename L, typename R>
+struct IsEqual
+{
+ static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+ "Types must be numeric.");
+ static constexpr bool Test(const L lhs, const R rhs)
+ {
+ return DstRangeRelationToSrcRange<R>(lhs) == DstRangeRelationToSrcRange<L>(rhs) &&
+ static_cast<decltype(lhs + rhs)>(lhs) == static_cast<decltype(lhs + rhs)>(rhs);
+ }
+};
+
+template <typename L, typename R>
+struct IsNotEqual
+{
+ static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+ "Types must be numeric.");
+ static constexpr bool Test(const L lhs, const R rhs)
+ {
+ return DstRangeRelationToSrcRange<R>(lhs) != DstRangeRelationToSrcRange<L>(rhs) ||
+ static_cast<decltype(lhs + rhs)>(lhs) != static_cast<decltype(lhs + rhs)>(rhs);
+ }
+};
+
+// These perform the actual math operations on the CheckedNumerics.
+// Binary arithmetic operations.
+template <template <typename, typename> class C, typename L, typename R>
+constexpr bool SafeCompare(const L lhs, const R rhs)
+{
+ static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+ "Types must be numeric.");
+ using Promotion = BigEnoughPromotion<L, R>;
+ using BigType = typename Promotion::type;
+ return Promotion::is_contained
+ // Force to a larger type for speed if both are contained.
+ ? C<BigType, BigType>::Test(static_cast<BigType>(static_cast<L>(lhs)),
+ static_cast<BigType>(static_cast<R>(rhs)))
+ // Let the template functions figure it out for mixed types.
+ : C<L, R>::Test(lhs, rhs);
+}
+
+template <typename Dst, typename Src>
+constexpr bool IsMaxInRangeForNumericType()
+{
+ return IsGreaterOrEqual<Dst, Src>::Test(std::numeric_limits<Dst>::max(),
+ std::numeric_limits<Src>::max());
+}
+
+template <typename Dst, typename Src>
+constexpr bool IsMinInRangeForNumericType()
+{
+ return IsLessOrEqual<Dst, Src>::Test(std::numeric_limits<Dst>::lowest(),
+ std::numeric_limits<Src>::lowest());
+}
+
+template <typename Dst, typename Src>
+constexpr Dst CommonMax()
+{
+ return !IsMaxInRangeForNumericType<Dst, Src>() ? Dst(std::numeric_limits<Dst>::max())
+ : Dst(std::numeric_limits<Src>::max());
+}
+
+template <typename Dst, typename Src>
+constexpr Dst CommonMin()
+{
+ return !IsMinInRangeForNumericType<Dst, Src>() ? Dst(std::numeric_limits<Dst>::lowest())
+ : Dst(std::numeric_limits<Src>::lowest());
+}
+
+// This is a wrapper to generate return the max or min for a supplied type.
+// If the argument is false, the returned value is the maximum. If true the
+// returned value is the minimum.
+template <typename Dst, typename Src = Dst>
+constexpr Dst CommonMaxOrMin(bool is_min)
+{
+ return is_min ? CommonMin<Dst, Src>() : CommonMax<Dst, Src>();
+}
+
+} // namespace internal
+} // namespace base
+} // namespace angle
+
+#endif // BASE_NUMERICS_SAFE_CONVERSIONS_IMPL_H_
diff --git a/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_math.h b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_math.h
new file mode 100644
index 0000000000..a708cfddc2
--- /dev/null
+++ b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_math.h
@@ -0,0 +1,12 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NUMERICS_SAFE_MATH_H_
+#define BASE_NUMERICS_SAFE_MATH_H_
+
+#include "anglebase/numerics/checked_math.h"
+#include "anglebase/numerics/clamped_math.h"
+#include "anglebase/numerics/safe_conversions.h"
+
+#endif // BASE_NUMERICS_SAFE_MATH_H_
diff --git a/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_math_arm_impl.h b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_math_arm_impl.h
new file mode 100644
index 0000000000..3efdf2596d
--- /dev/null
+++ b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_math_arm_impl.h
@@ -0,0 +1,131 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NUMERICS_SAFE_MATH_ARM_IMPL_H_
+#define BASE_NUMERICS_SAFE_MATH_ARM_IMPL_H_
+
+#include <cassert>
+#include <type_traits>
+
+#include "anglebase/numerics/safe_conversions.h"
+
+namespace angle
+{
+namespace base
+{
+namespace internal
+{
+
+template <typename T, typename U>
+struct CheckedMulFastAsmOp
+{
+ static const bool is_supported = FastIntegerArithmeticPromotion<T, U>::is_contained;
+
+ // The following is much more efficient than the Clang and GCC builtins for
+ // performing overflow-checked multiplication when a twice wider type is
+ // available. The below compiles down to 2-3 instructions, depending on the
+ // width of the types in use.
+ // As an example, an int32_t multiply compiles to:
+ // smull r0, r1, r0, r1
+ // cmp r1, r1, asr #31
+ // And an int16_t multiply compiles to:
+ // smulbb r1, r1, r0
+ // asr r2, r1, #16
+ // cmp r2, r1, asr #15
+ template <typename V>
+ __attribute__((always_inline)) static bool Do(T x, U y, V *result)
+ {
+ using Promotion = typename FastIntegerArithmeticPromotion<T, U>::type;
+ Promotion presult;
+
+ presult = static_cast<Promotion>(x) * static_cast<Promotion>(y);
+ if (!IsValueInRangeForNumericType<V>(presult))
+ return false;
+ *result = static_cast<V>(presult);
+ return true;
+ }
+};
+
+template <typename T, typename U>
+struct ClampedAddFastAsmOp
+{
+ static const bool is_supported =
+ BigEnoughPromotion<T, U>::is_contained &&
+ IsTypeInRangeForNumericType<int32_t, typename BigEnoughPromotion<T, U>::type>::value;
+
+ template <typename V>
+ __attribute__((always_inline)) static V Do(T x, U y)
+ {
+ // This will get promoted to an int, so let the compiler do whatever is
+ // clever and rely on the saturated cast to bounds check.
+ if (IsIntegerArithmeticSafe<int, T, U>::value)
+ return saturated_cast<V>(x + y);
+
+ int32_t result;
+ int32_t x_i32 = checked_cast<int32_t>(x);
+ int32_t y_i32 = checked_cast<int32_t>(y);
+
+ asm("qadd %[result], %[first], %[second]"
+ : [result] "=r"(result)
+ : [first] "r"(x_i32), [second] "r"(y_i32));
+ return saturated_cast<V>(result);
+ }
+};
+
+template <typename T, typename U>
+struct ClampedSubFastAsmOp
+{
+ static const bool is_supported =
+ BigEnoughPromotion<T, U>::is_contained &&
+ IsTypeInRangeForNumericType<int32_t, typename BigEnoughPromotion<T, U>::type>::value;
+
+ template <typename V>
+ __attribute__((always_inline)) static V Do(T x, U y)
+ {
+ // This will get promoted to an int, so let the compiler do whatever is
+ // clever and rely on the saturated cast to bounds check.
+ if (IsIntegerArithmeticSafe<int, T, U>::value)
+ return saturated_cast<V>(x - y);
+
+ int32_t result;
+ int32_t x_i32 = checked_cast<int32_t>(x);
+ int32_t y_i32 = checked_cast<int32_t>(y);
+
+ asm("qsub %[result], %[first], %[second]"
+ : [result] "=r"(result)
+ : [first] "r"(x_i32), [second] "r"(y_i32));
+ return saturated_cast<V>(result);
+ }
+};
+
+template <typename T, typename U>
+struct ClampedMulFastAsmOp
+{
+ static const bool is_supported = CheckedMulFastAsmOp<T, U>::is_supported;
+
+ template <typename V>
+ __attribute__((always_inline)) static V Do(T x, U y)
+ {
+ // Use the CheckedMulFastAsmOp for full-width 32-bit values, because
+ // it's fewer instructions than promoting and then saturating.
+ if (!IsIntegerArithmeticSafe<int32_t, T, U>::value &&
+ !IsIntegerArithmeticSafe<uint32_t, T, U>::value)
+ {
+ V result;
+ return CheckedMulFastAsmOp<T, U>::Do(x, y, &result)
+ ? result
+ : CommonMaxOrMin<V>(IsValueNegative(x) ^ IsValueNegative(y));
+ }
+
+ assert((FastIntegerArithmeticPromotion<T, U>::is_contained));
+ using Promotion = typename FastIntegerArithmeticPromotion<T, U>::type;
+ return saturated_cast<V>(static_cast<Promotion>(x) * static_cast<Promotion>(y));
+ }
+};
+
+} // namespace internal
+} // namespace base
+} // namespace angle
+
+#endif // BASE_NUMERICS_SAFE_MATH_ARM_IMPL_H_
diff --git a/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_math_clang_gcc_impl.h b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_math_clang_gcc_impl.h
new file mode 100644
index 0000000000..0f6a1e14d6
--- /dev/null
+++ b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_math_clang_gcc_impl.h
@@ -0,0 +1,182 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NUMERICS_SAFE_MATH_CLANG_GCC_IMPL_H_
+#define BASE_NUMERICS_SAFE_MATH_CLANG_GCC_IMPL_H_
+
+#include <cassert>
+#include <limits>
+#include <type_traits>
+
+#include "anglebase/numerics/safe_conversions.h"
+
+#if !defined(__native_client__) && (defined(__ARMEL__) || defined(__arch64__))
+# include "anglebase/numerics/safe_math_arm_impl.h"
+# define BASE_HAS_ASSEMBLER_SAFE_MATH (1)
+#else
+# define BASE_HAS_ASSEMBLER_SAFE_MATH (0)
+#endif
+
+namespace angle
+{
+namespace base
+{
+namespace internal
+{
+
+// These are the non-functioning boilerplate implementations of the optimized
+// safe math routines.
+#if !BASE_HAS_ASSEMBLER_SAFE_MATH
+template <typename T, typename U>
+struct CheckedMulFastAsmOp
+{
+ static const bool is_supported = false;
+ template <typename V>
+ static constexpr bool Do(T, U, V *)
+ {
+ // Force a compile failure if instantiated.
+ return CheckOnFailure::template HandleFailure<bool>();
+ }
+};
+
+template <typename T, typename U>
+struct ClampedAddFastAsmOp
+{
+ static const bool is_supported = false;
+ template <typename V>
+ static constexpr V Do(T, U)
+ {
+ // Force a compile failure if instantiated.
+ return CheckOnFailure::template HandleFailure<V>();
+ }
+};
+
+template <typename T, typename U>
+struct ClampedSubFastAsmOp
+{
+ static const bool is_supported = false;
+ template <typename V>
+ static constexpr V Do(T, U)
+ {
+ // Force a compile failure if instantiated.
+ return CheckOnFailure::template HandleFailure<V>();
+ }
+};
+
+template <typename T, typename U>
+struct ClampedMulFastAsmOp
+{
+ static const bool is_supported = false;
+ template <typename V>
+ static constexpr V Do(T, U)
+ {
+ // Force a compile failure if instantiated.
+ return CheckOnFailure::template HandleFailure<V>();
+ }
+};
+#endif // BASE_HAS_ASSEMBLER_SAFE_MATH
+#undef BASE_HAS_ASSEMBLER_SAFE_MATH
+
+template <typename T, typename U>
+struct CheckedAddFastOp
+{
+ static const bool is_supported = true;
+ template <typename V>
+ __attribute__((always_inline)) static constexpr bool Do(T x, U y, V *result)
+ {
+ return !__builtin_add_overflow(x, y, result);
+ }
+};
+
+template <typename T, typename U>
+struct CheckedSubFastOp
+{
+ static const bool is_supported = true;
+ template <typename V>
+ __attribute__((always_inline)) static constexpr bool Do(T x, U y, V *result)
+ {
+ return !__builtin_sub_overflow(x, y, result);
+ }
+};
+
+template <typename T, typename U>
+struct CheckedMulFastOp
+{
+#if defined(__clang__)
+ // TODO(jschuh): Get the Clang runtime library issues sorted out so we can
+ // support full-width, mixed-sign multiply builtins.
+ // https://crbug.com/613003
+ // We can support intptr_t, uintptr_t, or a smaller common type.
+ static const bool is_supported = (IsTypeInRangeForNumericType<intptr_t, T>::value &&
+ IsTypeInRangeForNumericType<intptr_t, U>::value) ||
+ (IsTypeInRangeForNumericType<uintptr_t, T>::value &&
+ IsTypeInRangeForNumericType<uintptr_t, U>::value);
+#else
+ static const bool is_supported = true;
+#endif
+ template <typename V>
+ __attribute__((always_inline)) static constexpr bool Do(T x, U y, V *result)
+ {
+ return CheckedMulFastAsmOp<T, U>::is_supported ? CheckedMulFastAsmOp<T, U>::Do(x, y, result)
+ : !__builtin_mul_overflow(x, y, result);
+ }
+};
+
+template <typename T, typename U>
+struct ClampedAddFastOp
+{
+ static const bool is_supported = ClampedAddFastAsmOp<T, U>::is_supported;
+ template <typename V>
+ __attribute__((always_inline)) static V Do(T x, U y)
+ {
+ return ClampedAddFastAsmOp<T, U>::template Do<V>(x, y);
+ }
+};
+
+template <typename T, typename U>
+struct ClampedSubFastOp
+{
+ static const bool is_supported = ClampedSubFastAsmOp<T, U>::is_supported;
+ template <typename V>
+ __attribute__((always_inline)) static V Do(T x, U y)
+ {
+ return ClampedSubFastAsmOp<T, U>::template Do<V>(x, y);
+ }
+};
+
+template <typename T, typename U>
+struct ClampedMulFastOp
+{
+ static const bool is_supported = ClampedMulFastAsmOp<T, U>::is_supported;
+ template <typename V>
+ __attribute__((always_inline)) static V Do(T x, U y)
+ {
+ return ClampedMulFastAsmOp<T, U>::template Do<V>(x, y);
+ }
+};
+
+template <typename T>
+struct ClampedNegFastOp
+{
+ static const bool is_supported = std::is_signed<T>::value;
+ __attribute__((always_inline)) static T Do(T value)
+ {
+ // Use this when there is no assembler path available.
+ if (!ClampedSubFastAsmOp<T, T>::is_supported)
+ {
+ T result;
+ return !__builtin_sub_overflow(T(0), value, &result) ? result
+ : std::numeric_limits<T>::max();
+ }
+
+ // Fallback to the normal subtraction path.
+ return ClampedSubFastOp<T, T>::template Do<T>(T(0), value);
+ }
+};
+
+} // namespace internal
+} // namespace base
+} // namespace angle
+
+#endif // BASE_NUMERICS_SAFE_MATH_CLANG_GCC_IMPL_H_
diff --git a/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_math_shared_impl.h b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_math_shared_impl.h
new file mode 100644
index 0000000000..6f46f12e5f
--- /dev/null
+++ b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_math_shared_impl.h
@@ -0,0 +1,227 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NUMERICS_SAFE_MATH_SHARED_IMPL_H_
+#define BASE_NUMERICS_SAFE_MATH_SHARED_IMPL_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <cassert>
+#include <climits>
+#include <cmath>
+#include <cstdlib>
+#include <limits>
+#include <type_traits>
+
+#include "anglebase/numerics/safe_conversions.h"
+
+#if defined(OS_ASMJS)
+// Optimized safe math instructions are incompatible with asmjs.
+# define BASE_HAS_OPTIMIZED_SAFE_MATH (0)
+// Where available use builtin math overflow support on Clang and GCC.
+#elif !defined(__native_client__) && \
+ ((defined(__clang__) && \
+ ((__clang_major__ > 3) || (__clang_major__ == 3 && __clang_minor__ >= 4))) || \
+ (defined(__GNUC__) && __GNUC__ >= 5))
+# include "anglebase/numerics/safe_math_clang_gcc_impl.h"
+# define BASE_HAS_OPTIMIZED_SAFE_MATH (1)
+#else
+# define BASE_HAS_OPTIMIZED_SAFE_MATH (0)
+#endif
+
+namespace angle
+{
+namespace base
+{
+namespace internal
+{
+
+// These are the non-functioning boilerplate implementations of the optimized
+// safe math routines.
+#if !BASE_HAS_OPTIMIZED_SAFE_MATH
+template <typename T, typename U>
+struct CheckedAddFastOp
+{
+ static const bool is_supported = false;
+ template <typename V>
+ static constexpr bool Do(T, U, V *)
+ {
+ // Force a compile failure if instantiated.
+ return CheckOnFailure::template HandleFailure<bool>();
+ }
+};
+
+template <typename T, typename U>
+struct CheckedSubFastOp
+{
+ static const bool is_supported = false;
+ template <typename V>
+ static constexpr bool Do(T, U, V *)
+ {
+ // Force a compile failure if instantiated.
+ return CheckOnFailure::template HandleFailure<bool>();
+ }
+};
+
+template <typename T, typename U>
+struct CheckedMulFastOp
+{
+ static const bool is_supported = false;
+ template <typename V>
+ static constexpr bool Do(T, U, V *)
+ {
+ // Force a compile failure if instantiated.
+ return CheckOnFailure::template HandleFailure<bool>();
+ }
+};
+
+template <typename T, typename U>
+struct ClampedAddFastOp
+{
+ static const bool is_supported = false;
+ template <typename V>
+ static constexpr V Do(T, U)
+ {
+ // Force a compile failure if instantiated.
+ return CheckOnFailure::template HandleFailure<V>();
+ }
+};
+
+template <typename T, typename U>
+struct ClampedSubFastOp
+{
+ static const bool is_supported = false;
+ template <typename V>
+ static constexpr V Do(T, U)
+ {
+ // Force a compile failure if instantiated.
+ return CheckOnFailure::template HandleFailure<V>();
+ }
+};
+
+template <typename T, typename U>
+struct ClampedMulFastOp
+{
+ static const bool is_supported = false;
+ template <typename V>
+ static constexpr V Do(T, U)
+ {
+ // Force a compile failure if instantiated.
+ return CheckOnFailure::template HandleFailure<V>();
+ }
+};
+
+template <typename T>
+struct ClampedNegFastOp
+{
+ static const bool is_supported = false;
+ static constexpr T Do(T)
+ {
+ // Force a compile failure if instantiated.
+ return CheckOnFailure::template HandleFailure<T>();
+ }
+};
+#endif // BASE_HAS_OPTIMIZED_SAFE_MATH
+#undef BASE_HAS_OPTIMIZED_SAFE_MATH
+
+// This is used for UnsignedAbs, where we need to support floating-point
+// template instantiations even though we don't actually support the operations.
+// However, there is no corresponding implementation of e.g. SafeUnsignedAbs,
+// so the float versions will not compile.
+template <typename Numeric,
+ bool IsInteger = std::is_integral<Numeric>::value,
+ bool IsFloat = std::is_floating_point<Numeric>::value>
+struct UnsignedOrFloatForSize;
+
+template <typename Numeric>
+struct UnsignedOrFloatForSize<Numeric, true, false>
+{
+ using type = typename std::make_unsigned<Numeric>::type;
+};
+
+template <typename Numeric>
+struct UnsignedOrFloatForSize<Numeric, false, true>
+{
+ using type = Numeric;
+};
+
+// Wrap the unary operations to allow SFINAE when instantiating integrals versus
+// floating points. These don't perform any overflow checking. Rather, they
+// exhibit well-defined overflow semantics and rely on the caller to detect
+// if an overflow occured.
+
+template <typename T, typename std::enable_if<std::is_integral<T>::value>::type * = nullptr>
+constexpr T NegateWrapper(T value)
+{
+ using UnsignedT = typename std::make_unsigned<T>::type;
+ // This will compile to a NEG on Intel, and is normal negation on ARM.
+ return static_cast<T>(UnsignedT(0) - static_cast<UnsignedT>(value));
+}
+
+template <typename T, typename std::enable_if<std::is_floating_point<T>::value>::type * = nullptr>
+constexpr T NegateWrapper(T value)
+{
+ return -value;
+}
+
+template <typename T, typename std::enable_if<std::is_integral<T>::value>::type * = nullptr>
+constexpr typename std::make_unsigned<T>::type InvertWrapper(T value)
+{
+ return ~value;
+}
+
+template <typename T, typename std::enable_if<std::is_integral<T>::value>::type * = nullptr>
+constexpr T AbsWrapper(T value)
+{
+ return static_cast<T>(SafeUnsignedAbs(value));
+}
+
+template <typename T, typename std::enable_if<std::is_floating_point<T>::value>::type * = nullptr>
+constexpr T AbsWrapper(T value)
+{
+ return value < 0 ? -value : value;
+}
+
+template <template <typename, typename, typename> class M, typename L, typename R>
+struct MathWrapper
+{
+ using math = M<typename UnderlyingType<L>::type, typename UnderlyingType<R>::type, void>;
+ using type = typename math::result_type;
+};
+
+// The following macros are just boilerplate for the standard arithmetic
+// operator overloads and variadic function templates. A macro isn't the nicest
+// solution, but it beats rewriting these over and over again.
+#define BASE_NUMERIC_ARITHMETIC_VARIADIC(CLASS, CL_ABBR, OP_NAME) \
+ template <typename L, typename R, typename... Args> \
+ constexpr auto CL_ABBR##OP_NAME(const L lhs, const R rhs, const Args... args) \
+ { \
+ return CL_ABBR##MathOp<CLASS##OP_NAME##Op, L, R, Args...>(lhs, rhs, args...); \
+ }
+
+#define BASE_NUMERIC_ARITHMETIC_OPERATORS(CLASS, CL_ABBR, OP_NAME, OP, CMP_OP) \
+ /* Binary arithmetic operator for all CLASS##Numeric operations. */ \
+ template <typename L, typename R, \
+ typename std::enable_if<Is##CLASS##Op<L, R>::value>::type * = nullptr> \
+ constexpr CLASS##Numeric<typename MathWrapper<CLASS##OP_NAME##Op, L, R>::type> operator OP( \
+ const L lhs, const R rhs) \
+ { \
+ return decltype(lhs OP rhs)::template MathOp<CLASS##OP_NAME##Op>(lhs, rhs); \
+ } \
+ /* Assignment arithmetic operator implementation from CLASS##Numeric. */ \
+ template <typename L> \
+ template <typename R> \
+ constexpr CLASS##Numeric<L> &CLASS##Numeric<L>::operator CMP_OP(const R rhs) \
+ { \
+ return MathOp<CLASS##OP_NAME##Op>(rhs); \
+ } \
+ /* Variadic arithmetic functions that return CLASS##Numeric. */ \
+ BASE_NUMERIC_ARITHMETIC_VARIADIC(CLASS, CL_ABBR, OP_NAME)
+
+} // namespace internal
+} // namespace base
+} // namespace angle
+
+#endif // BASE_NUMERICS_SAFE_MATH_SHARED_IMPL_H_
diff --git a/gfx/angle/checkout/src/common/third_party/base/anglebase/sha1.cc b/gfx/angle/checkout/src/common/third_party/base/anglebase/sha1.cc
new file mode 100644
index 0000000000..cb88ba06e1
--- /dev/null
+++ b/gfx/angle/checkout/src/common/third_party/base/anglebase/sha1.cc
@@ -0,0 +1,245 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "anglebase/sha1.h"
+
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+
+#include "anglebase/sys_byteorder.h"
+
+namespace angle
+{
+
+namespace base
+{
+
+// Implementation of SHA-1. Only handles data in byte-sized blocks,
+// which simplifies the code a fair bit.
+
+// Identifier names follow notation in FIPS PUB 180-3, where you'll
+// also find a description of the algorithm:
+// http://csrc.nist.gov/publications/fips/fips180-3/fips180-3_final.pdf
+
+// Usage example:
+//
+// SecureHashAlgorithm sha;
+// while(there is data to hash)
+// sha.Update(moredata, size of data);
+// sha.Final();
+// memcpy(somewhere, sha.Digest(), 20);
+//
+// to reuse the instance of sha, call sha.Init();
+
+// TODO(jhawkins): Replace this implementation with a per-platform
+// implementation using each platform's crypto library. See
+// http://crbug.com/47218
+
+class SecureHashAlgorithm
+{
+ public:
+ SecureHashAlgorithm() { Init(); }
+
+ static const int kDigestSizeBytes;
+
+ void Init();
+ void Update(const void *data, size_t nbytes);
+ void Final();
+
+ // 20 bytes of message digest.
+ const unsigned char *Digest() const { return reinterpret_cast<const unsigned char *>(H); }
+
+ private:
+ void Pad();
+ void Process();
+
+ uint32_t A, B, C, D, E;
+
+ uint32_t H[5];
+
+ union {
+ uint32_t W[80];
+ uint8_t M[64];
+ };
+
+ uint32_t cursor;
+ uint64_t l;
+};
+
+static inline uint32_t f(uint32_t t, uint32_t B, uint32_t C, uint32_t D)
+{
+ if (t < 20)
+ {
+ return (B & C) | ((~B) & D);
+ }
+ else if (t < 40)
+ {
+ return B ^ C ^ D;
+ }
+ else if (t < 60)
+ {
+ return (B & C) | (B & D) | (C & D);
+ }
+ else
+ {
+ return B ^ C ^ D;
+ }
+}
+
+static inline uint32_t S(uint32_t n, uint32_t X)
+{
+ return (X << n) | (X >> (32 - n));
+}
+
+static inline uint32_t K(uint32_t t)
+{
+ if (t < 20)
+ {
+ return 0x5a827999;
+ }
+ else if (t < 40)
+ {
+ return 0x6ed9eba1;
+ }
+ else if (t < 60)
+ {
+ return 0x8f1bbcdc;
+ }
+ else
+ {
+ return 0xca62c1d6;
+ }
+}
+
+const int SecureHashAlgorithm::kDigestSizeBytes = 20;
+
+void SecureHashAlgorithm::Init()
+{
+ A = 0;
+ B = 0;
+ C = 0;
+ D = 0;
+ E = 0;
+ cursor = 0;
+ l = 0;
+ H[0] = 0x67452301;
+ H[1] = 0xefcdab89;
+ H[2] = 0x98badcfe;
+ H[3] = 0x10325476;
+ H[4] = 0xc3d2e1f0;
+}
+
+void SecureHashAlgorithm::Final()
+{
+ Pad();
+ Process();
+
+ for (int t = 0; t < 5; ++t)
+ H[t] = ByteSwap(H[t]);
+}
+
+void SecureHashAlgorithm::Update(const void *data, size_t nbytes)
+{
+ const uint8_t *d = reinterpret_cast<const uint8_t *>(data);
+ while (nbytes--)
+ {
+ M[cursor++] = *d++;
+ if (cursor >= 64)
+ Process();
+ l += 8;
+ }
+}
+
+void SecureHashAlgorithm::Pad()
+{
+ M[cursor++] = 0x80;
+
+ if (cursor > 64 - 8)
+ {
+ // pad out to next block
+ while (cursor < 64)
+ M[cursor++] = 0;
+
+ Process();
+ }
+
+ while (cursor < 64 - 8)
+ M[cursor++] = 0;
+
+ M[cursor++] = (l >> 56) & 0xff;
+ M[cursor++] = (l >> 48) & 0xff;
+ M[cursor++] = (l >> 40) & 0xff;
+ M[cursor++] = (l >> 32) & 0xff;
+ M[cursor++] = (l >> 24) & 0xff;
+ M[cursor++] = (l >> 16) & 0xff;
+ M[cursor++] = (l >> 8) & 0xff;
+ M[cursor++] = l & 0xff;
+}
+
+void SecureHashAlgorithm::Process()
+{
+ uint32_t t;
+
+ // Each a...e corresponds to a section in the FIPS 180-3 algorithm.
+
+ // a.
+ //
+ // W and M are in a union, so no need to memcpy.
+ // memcpy(W, M, sizeof(M));
+ for (t = 0; t < 16; ++t)
+ W[t] = ByteSwap(W[t]);
+
+ // b.
+ for (t = 16; t < 80; ++t)
+ W[t] = S(1, W[t - 3] ^ W[t - 8] ^ W[t - 14] ^ W[t - 16]);
+
+ // c.
+ A = H[0];
+ B = H[1];
+ C = H[2];
+ D = H[3];
+ E = H[4];
+
+ // d.
+ for (t = 0; t < 80; ++t)
+ {
+ uint32_t TEMP = S(5, A) + f(t, B, C, D) + E + W[t] + K(t);
+ E = D;
+ D = C;
+ C = S(30, B);
+ B = A;
+ A = TEMP;
+ }
+
+ // e.
+ H[0] += A;
+ H[1] += B;
+ H[2] += C;
+ H[3] += D;
+ H[4] += E;
+
+ cursor = 0;
+}
+
+std::string SHA1HashString(const std::string &str)
+{
+ char hash[SecureHashAlgorithm::kDigestSizeBytes];
+ SHA1HashBytes(reinterpret_cast<const unsigned char *>(str.c_str()), str.length(),
+ reinterpret_cast<unsigned char *>(hash));
+ return std::string(hash, SecureHashAlgorithm::kDigestSizeBytes);
+}
+
+void SHA1HashBytes(const unsigned char *data, size_t len, unsigned char *hash)
+{
+ SecureHashAlgorithm sha;
+ sha.Update(data, len);
+ sha.Final();
+
+ memcpy(hash, sha.Digest(), SecureHashAlgorithm::kDigestSizeBytes);
+}
+
+} // namespace base
+
+} // namespace angle
diff --git a/gfx/angle/checkout/src/common/third_party/base/anglebase/sha1.h b/gfx/angle/checkout/src/common/third_party/base/anglebase/sha1.h
new file mode 100644
index 0000000000..a60908814f
--- /dev/null
+++ b/gfx/angle/checkout/src/common/third_party/base/anglebase/sha1.h
@@ -0,0 +1,36 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef ANGLEBASE_SHA1_H_
+#define ANGLEBASE_SHA1_H_
+
+#include <stddef.h>
+
+#include <string>
+
+#include "anglebase/base_export.h"
+
+namespace angle
+{
+
+namespace base
+{
+
+// These functions perform SHA-1 operations.
+
+static const size_t kSHA1Length = 20; // Length in bytes of a SHA-1 hash.
+
+// Computes the SHA-1 hash of the input string |str| and returns the full
+// hash.
+ANGLEBASE_EXPORT std::string SHA1HashString(const std::string &str);
+
+// Computes the SHA-1 hash of the |len| bytes in |data| and puts the hash
+// in |hash|. |hash| must be kSHA1Length bytes long.
+ANGLEBASE_EXPORT void SHA1HashBytes(const unsigned char *data, size_t len, unsigned char *hash);
+
+} // namespace base
+
+} // namespace angle
+
+#endif // ANGLEBASE_SHA1_H_
diff --git a/gfx/angle/checkout/src/common/third_party/base/anglebase/sys_byteorder.h b/gfx/angle/checkout/src/common/third_party/base/anglebase/sys_byteorder.h
new file mode 100644
index 0000000000..70d9c275e6
--- /dev/null
+++ b/gfx/angle/checkout/src/common/third_party/base/anglebase/sys_byteorder.h
@@ -0,0 +1,49 @@
+//
+// Copyright 2017 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// sys_byteorder.h: Compatiblity hacks for importing Chromium's base/SHA1.
+
+#ifndef ANGLEBASE_SYS_BYTEORDER_H_
+#define ANGLEBASE_SYS_BYTEORDER_H_
+
+namespace angle
+{
+
+namespace base
+{
+
+// Returns a value with all bytes in |x| swapped, i.e. reverses the endianness.
+inline uint16_t ByteSwap(uint16_t x)
+{
+#if defined(_MSC_VER)
+ return _byteswap_ushort(x);
+#else
+ return __builtin_bswap16(x);
+#endif
+}
+
+inline uint32_t ByteSwap(uint32_t x)
+{
+#if defined(_MSC_VER)
+ return _byteswap_ulong(x);
+#else
+ return __builtin_bswap32(x);
+#endif
+}
+
+inline uint64_t ByteSwap(uint64_t x)
+{
+#if defined(_MSC_VER)
+ return _byteswap_uint64(x);
+#else
+ return __builtin_bswap64(x);
+#endif
+}
+
+} // namespace base
+
+} // namespace angle
+
+#endif // ANGLEBASE_SYS_BYTEORDER_H_
diff --git a/gfx/angle/checkout/src/common/third_party/smhasher/src/PMurHash.cpp b/gfx/angle/checkout/src/common/third_party/smhasher/src/PMurHash.cpp
new file mode 100644
index 0000000000..379e5ce3d5
--- /dev/null
+++ b/gfx/angle/checkout/src/common/third_party/smhasher/src/PMurHash.cpp
@@ -0,0 +1,339 @@
+/*-----------------------------------------------------------------------------
+ * MurmurHash3 was written by Austin Appleby, and is placed in the public
+ * domain.
+ *
+ * This implementation was written by Shane Day, and is also public domain.
+ *
+ * This is a portable ANSI C implementation of MurmurHash3_x86_32 (Murmur3A)
+ * with support for progressive processing.
+ */
+
+/*-----------------------------------------------------------------------------
+
+If you want to understand the MurmurHash algorithm you would be much better
+off reading the original source. Just point your browser at:
+http://code.google.com/p/smhasher/source/browse/trunk/MurmurHash3.cpp
+
+
+What this version provides?
+
+1. Progressive data feeding. Useful when the entire payload to be hashed
+does not fit in memory or when the data is streamed through the application.
+Also useful when hashing a number of strings with a common prefix. A partial
+hash of a prefix string can be generated and reused for each suffix string.
+
+2. Portability. Plain old C so that it should compile on any old compiler.
+Both CPU endian and access-alignment neutral, but avoiding inefficient code
+when possible depending on CPU capabilities.
+
+3. Drop in. I personally like nice self contained public domain code, making it
+easy to pilfer without loads of refactoring to work properly in the existing
+application code & makefile structure and mucking around with licence files.
+Just copy PMurHash.h and PMurHash.c and you're ready to go.
+
+
+How does it work?
+
+We can only process entire 32 bit chunks of input, except for the very end
+that may be shorter. So along with the partial hash we need to give back to
+the caller a carry containing up to 3 bytes that we were unable to process.
+This carry also needs to record the number of bytes the carry holds. I use
+the low 2 bits as a count (0..3) and the carry bytes are shifted into the
+high byte in stream order.
+
+To handle endianess I simply use a macro that reads a uint32_t and define
+that macro to be a direct read on little endian machines, a read and swap
+on big endian machines, or a byte-by-byte read if the endianess is unknown.
+
+-----------------------------------------------------------------------------*/
+
+#include "PMurHash.h"
+#include <stdint.h>
+
+/* I used ugly type names in the header to avoid potential conflicts with
+ * application or system typedefs & defines. Since I'm not including any more
+ * headers below here I can rename these so that the code reads like C99 */
+#undef uint32_t
+#define uint32_t MH_UINT32
+#undef uint8_t
+#define uint8_t MH_UINT8
+
+/* MSVC warnings we choose to ignore */
+#if defined(_MSC_VER)
+# pragma warning(disable : 4127) /* conditional expression is constant */
+#endif
+
+/*-----------------------------------------------------------------------------
+ * Endianess, misalignment capabilities and util macros
+ *
+ * The following 3 macros are defined in this section. The other macros defined
+ * are only needed to help derive these 3.
+ *
+ * READ_UINT32(x) Read a little endian unsigned 32-bit int
+ * UNALIGNED_SAFE Defined if READ_UINT32 works on non-word boundaries
+ * ROTL32(x,r) Rotate x left by r bits
+ */
+
+/* Convention is to define __BYTE_ORDER == to one of these values */
+#if !defined(__BIG_ENDIAN)
+# define __BIG_ENDIAN 4321
+#endif
+#if !defined(__LITTLE_ENDIAN)
+# define __LITTLE_ENDIAN 1234
+#endif
+
+/* I386 */
+#if defined(_M_IX86) || defined(__i386__) || defined(__i386) || defined(i386)
+# define __BYTE_ORDER __LITTLE_ENDIAN
+# define UNALIGNED_SAFE
+#endif
+
+/* gcc 'may' define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ to 1 (Note the trailing __),
+ * or even _LITTLE_ENDIAN or _BIG_ENDIAN (Note the single _ prefix) */
+#if !defined(__BYTE_ORDER)
+# if defined(__LITTLE_ENDIAN__) && __LITTLE_ENDIAN__ == 1 || \
+ defined(_LITTLE_ENDIAN) && _LITTLE_ENDIAN == 1
+# define __BYTE_ORDER __LITTLE_ENDIAN
+# elif defined(__BIG_ENDIAN__) && __BIG_ENDIAN__ == 1 || defined(_BIG_ENDIAN) && _BIG_ENDIAN == 1
+# define __BYTE_ORDER __BIG_ENDIAN
+# endif
+#endif
+
+/* gcc (usually) defines xEL/EB macros for ARM and MIPS endianess */
+#if !defined(__BYTE_ORDER)
+# if defined(__ARMEL__) || defined(__MIPSEL__)
+# define __BYTE_ORDER __LITTLE_ENDIAN
+# endif
+# if defined(__ARMEB__) || defined(__MIPSEB__)
+# define __BYTE_ORDER __BIG_ENDIAN
+# endif
+#endif
+
+/* Now find best way we can to READ_UINT32 */
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+/* CPU endian matches murmurhash algorithm, so read 32-bit word directly */
+# define READ_UINT32(ptr) (*((uint32_t *)(ptr)))
+#elif __BYTE_ORDER == __BIG_ENDIAN
+/* TODO: Add additional cases below where a compiler provided bswap32 is available */
+# if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))
+# define READ_UINT32(ptr) (__builtin_bswap32(*((uint32_t *)(ptr))))
+# else
+/* Without a known fast bswap32 we're just as well off doing this */
+# define READ_UINT32(ptr) (ptr[0] | ptr[1] << 8 | ptr[2] << 16 | ptr[3] << 24)
+# define UNALIGNED_SAFE
+# endif
+#else
+/* Unknown endianess so last resort is to read individual bytes */
+# define READ_UINT32(ptr) (ptr[0] | ptr[1] << 8 | ptr[2] << 16 | ptr[3] << 24)
+
+/* Since we're not doing word-reads we can skip the messing about with realignment */
+# define UNALIGNED_SAFE
+#endif
+
+/* Find best way to ROTL32 */
+#if defined(_MSC_VER)
+# include <stdlib.h> /* Microsoft put _rotl declaration in here */
+# define ROTL32(x, r) _rotl(x, r)
+#else
+/* gcc recognises this code and generates a rotate instruction for CPUs with one */
+# define ROTL32(x, r) (((uint32_t)x << r) | ((uint32_t)x >> (32 - r)))
+#endif
+
+/*-----------------------------------------------------------------------------
+ * Core murmurhash algorithm macros */
+
+#define C1 (0xcc9e2d51)
+#define C2 (0x1b873593)
+
+/* This is the main processing body of the algorithm. It operates
+ * on each full 32-bits of input. */
+#define DOBLOCK(h1, k1) \
+ do \
+ { \
+ k1 *= C1; \
+ k1 = ROTL32(k1, 15); \
+ k1 *= C2; \
+ \
+ h1 ^= k1; \
+ h1 = ROTL32(h1, 13); \
+ h1 = h1 * 5 + 0xe6546b64; \
+ } while (0)
+
+/* Append unaligned bytes to carry, forcing hash churn if we have 4 bytes */
+/* cnt=bytes to process, h1=name of h1 var, c=carry, n=bytes in c, ptr/len=payload */
+#define DOBYTES(cnt, h1, c, n, ptr, len) \
+ do \
+ { \
+ int _i = cnt; \
+ while (_i--) \
+ { \
+ c = c >> 8 | *ptr++ << 24; \
+ n++; \
+ len--; \
+ if (n == 4) \
+ { \
+ DOBLOCK(h1, c); \
+ n = 0; \
+ } \
+ } \
+ } while (0)
+
+/*---------------------------------------------------------------------------*/
+
+namespace angle
+{
+/* Main hashing function. Initialise carry to 0 and h1 to 0 or an initial seed
+ * if wanted. Both ph1 and pcarry are required arguments. */
+void PMurHash32_Process(uint32_t *ph1, uint32_t *pcarry, const void *key, int len)
+{
+ uint32_t h1 = *ph1;
+ uint32_t c = *pcarry;
+
+ const uint8_t *ptr = (uint8_t *)key;
+ const uint8_t *end;
+
+ /* Extract carry count from low 2 bits of c value */
+ int n = c & 3;
+
+#if defined(UNALIGNED_SAFE)
+ /* This CPU handles unaligned word access */
+
+ /* Consume any carry bytes */
+ int i = (4 - n) & 3;
+ if (i && i <= len)
+ {
+ DOBYTES(i, h1, c, n, ptr, len);
+ }
+
+ /* Process 32-bit chunks */
+ end = ptr + len / 4 * 4;
+ for (; ptr < end; ptr += 4)
+ {
+ uint32_t k1 = READ_UINT32(ptr);
+ DOBLOCK(h1, k1);
+ }
+
+#else /*UNALIGNED_SAFE*/
+ /* This CPU does not handle unaligned word access */
+
+ /* Consume enough so that the next data byte is word aligned */
+ int i = -(intptr_t)ptr & 3;
+ if (i && i <= len)
+ {
+ DOBYTES(i, h1, c, n, ptr, len);
+ }
+
+ /* We're now aligned. Process in aligned blocks. Specialise for each possible carry count */
+ end = ptr + len / 4 * 4;
+ switch (n)
+ { /* how many bytes in c */
+ case 0: /* c=[----] w=[3210] b=[3210]=w c'=[----] */
+ for (; ptr < end; ptr += 4)
+ {
+ uint32_t k1 = READ_UINT32(ptr);
+ DOBLOCK(h1, k1);
+ }
+ break;
+ case 1: /* c=[0---] w=[4321] b=[3210]=c>>24|w<<8 c'=[4---] */
+ for (; ptr < end; ptr += 4)
+ {
+ uint32_t k1 = c >> 24;
+ c = READ_UINT32(ptr);
+ k1 |= c << 8;
+ DOBLOCK(h1, k1);
+ }
+ break;
+ case 2: /* c=[10--] w=[5432] b=[3210]=c>>16|w<<16 c'=[54--] */
+ for (; ptr < end; ptr += 4)
+ {
+ uint32_t k1 = c >> 16;
+ c = READ_UINT32(ptr);
+ k1 |= c << 16;
+ DOBLOCK(h1, k1);
+ }
+ break;
+ case 3: /* c=[210-] w=[6543] b=[3210]=c>>8|w<<24 c'=[654-] */
+ for (; ptr < end; ptr += 4)
+ {
+ uint32_t k1 = c >> 8;
+ c = READ_UINT32(ptr);
+ k1 |= c << 24;
+ DOBLOCK(h1, k1);
+ }
+ }
+#endif /*UNALIGNED_SAFE*/
+
+ /* Advance over whole 32-bit chunks, possibly leaving 1..3 bytes */
+ len -= len / 4 * 4;
+
+ /* Append any remaining bytes into carry */
+ DOBYTES(len, h1, c, n, ptr, len);
+
+ /* Copy out new running hash and carry */
+ *ph1 = h1;
+ *pcarry = (c & ~0xff) | n;
+}
+
+/*---------------------------------------------------------------------------*/
+
+/* Finalize a hash. To match the original Murmur3A the total_length must be provided */
+uint32_t PMurHash32_Result(uint32_t h, uint32_t carry, uint32_t total_length)
+{
+ uint32_t k1;
+ int n = carry & 3;
+ if (n)
+ {
+ k1 = carry >> (4 - n) * 8;
+ k1 *= C1;
+ k1 = ROTL32(k1, 15);
+ k1 *= C2;
+ h ^= k1;
+ }
+ h ^= total_length;
+
+ /* fmix */
+ h ^= h >> 16;
+ h *= 0x85ebca6b;
+ h ^= h >> 13;
+ h *= 0xc2b2ae35;
+ h ^= h >> 16;
+
+ return h;
+}
+
+/*---------------------------------------------------------------------------*/
+
+/* Murmur3A compatable all-at-once */
+uint32_t PMurHash32(uint32_t seed, const void *key, int len)
+{
+ uint32_t h1 = seed, carry = 0;
+ PMurHash32_Process(&h1, &carry, key, len);
+ return PMurHash32_Result(h1, carry, len);
+}
+
+/*---------------------------------------------------------------------------*/
+
+/* Provide an API suitable for smhasher */
+void PMurHash32_test(const void *key, int len, uint32_t seed, void *out)
+{
+ uint32_t h1 = seed, carry = 0;
+ const uint8_t *ptr = (uint8_t *)key;
+ const uint8_t *end = ptr + len;
+
+#if 0 /* Exercise the progressive processing */
+ while(ptr < end) {
+ //const uint8_t *mid = ptr + rand()%(end-ptr)+1;
+ const uint8_t *mid = ptr + (rand()&0xF);
+ mid = mid<end?mid:end;
+ PMurHash32_Process(&h1, &carry, ptr, mid-ptr);
+ ptr = mid;
+ }
+#else
+ PMurHash32_Process(&h1, &carry, ptr, (int)(end - ptr));
+#endif
+ h1 = PMurHash32_Result(h1, carry, len);
+ *(uint32_t *)out = h1;
+}
+} // namespace angle
+
+/*---------------------------------------------------------------------------*/
diff --git a/gfx/angle/checkout/src/common/third_party/smhasher/src/PMurHash.h b/gfx/angle/checkout/src/common/third_party/smhasher/src/PMurHash.h
new file mode 100644
index 0000000000..0a3c96fa14
--- /dev/null
+++ b/gfx/angle/checkout/src/common/third_party/smhasher/src/PMurHash.h
@@ -0,0 +1,57 @@
+/*-----------------------------------------------------------------------------
+ * MurmurHash3 was written by Austin Appleby, and is placed in the public
+ * domain.
+ *
+ * This implementation was written by Shane Day, and is also public domain.
+ *
+ * This is a portable ANSI C implementation of MurmurHash3_x86_32 (Murmur3A)
+ * with support for progressive processing.
+ */
+
+/* ------------------------------------------------------------------------- */
+/* Determine what native type to use for uint32_t */
+
+/* We can't use the name 'uint32_t' here because it will conflict with
+ * any version provided by the system headers or application. */
+
+/* First look for special cases */
+#if defined(_MSC_VER)
+# define MH_UINT32 unsigned long
+#endif
+
+/* If the compiler says it's C99 then take its word for it */
+#if !defined(MH_UINT32) && (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L)
+# include <stdint.h>
+# define MH_UINT32 uint32_t
+#endif
+
+/* Otherwise try testing against max value macros from limit.h */
+#if !defined(MH_UINT32)
+# include <limits.h>
+# if (USHRT_MAX == 0xffffffffUL)
+# define MH_UINT32 unsigned short
+# elif (UINT_MAX == 0xffffffffUL)
+# define MH_UINT32 unsigned int
+# elif (ULONG_MAX == 0xffffffffUL)
+# define MH_UINT32 unsigned long
+# endif
+#endif
+
+#if !defined(MH_UINT32)
+# error Unable to determine type name for unsigned 32-bit int
+#endif
+
+/* I'm yet to work on a platform where 'unsigned char' is not 8 bits */
+#define MH_UINT8 unsigned char
+
+/* ------------------------------------------------------------------------- */
+/* Prototypes */
+
+namespace angle
+{
+void PMurHash32_Process(MH_UINT32 *ph1, MH_UINT32 *pcarry, const void *key, int len);
+MH_UINT32 PMurHash32_Result(MH_UINT32 h1, MH_UINT32 carry, MH_UINT32 total_length);
+MH_UINT32 PMurHash32(MH_UINT32 seed, const void *key, int len);
+
+void PMurHash32_test(const void *key, int len, MH_UINT32 seed, void *out);
+} // namespace angle
diff --git a/gfx/angle/checkout/src/common/third_party/xxhash/xxhash.c b/gfx/angle/checkout/src/common/third_party/xxhash/xxhash.c
new file mode 100644
index 0000000000..ae9a55116c
--- /dev/null
+++ b/gfx/angle/checkout/src/common/third_party/xxhash/xxhash.c
@@ -0,0 +1,1030 @@
+/*
+* xxHash - Fast Hash algorithm
+* Copyright (C) 2012-2016, Yann Collet
+*
+* BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following disclaimer
+* in the documentation and/or other materials provided with the
+* distribution.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*
+* You can contact the author at :
+* - xxHash homepage: http://www.xxhash.com
+* - xxHash source repository : https://github.com/Cyan4973/xxHash
+*/
+
+
+/* *************************************
+* Tuning parameters
+***************************************/
+/*!XXH_FORCE_MEMORY_ACCESS :
+ * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
+ * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
+ * The below switch allow to select different access method for improved performance.
+ * Method 0 (default) : use `memcpy()`. Safe and portable.
+ * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
+ * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
+ * Method 2 : direct access. This method doesn't depend on compiler but violate C standard.
+ * It can generate buggy code on targets which do not support unaligned memory accesses.
+ * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
+ * See http://stackoverflow.com/a/32095106/646947 for details.
+ * Prefer these methods in priority order (0 > 1 > 2)
+ */
+#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
+# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \
+ || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) \
+ || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
+# define XXH_FORCE_MEMORY_ACCESS 2
+# elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || \
+ (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \
+ || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \
+ || defined(__ARM_ARCH_7S__) ))
+# define XXH_FORCE_MEMORY_ACCESS 1
+# endif
+#endif
+
+/*!XXH_ACCEPT_NULL_INPUT_POINTER :
+ * If input pointer is NULL, xxHash default behavior is to dereference it, triggering a segfault.
+ * When this macro is enabled, xxHash actively checks input for null pointer.
+ * It it is, result for null input pointers is the same as a null-length input.
+ */
+#ifndef XXH_ACCEPT_NULL_INPUT_POINTER /* can be defined externally */
+# define XXH_ACCEPT_NULL_INPUT_POINTER 0
+#endif
+
+/*!XXH_FORCE_NATIVE_FORMAT :
+ * By default, xxHash library provides endian-independent Hash values, based on little-endian convention.
+ * Results are therefore identical for little-endian and big-endian CPU.
+ * This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format.
+ * Should endian-independence be of no importance for your application, you may set the #define below to 1,
+ * to improve speed for Big-endian CPU.
+ * This option has no impact on Little_Endian CPU.
+ */
+#ifndef XXH_FORCE_NATIVE_FORMAT /* can be defined externally */
+# define XXH_FORCE_NATIVE_FORMAT 0
+#endif
+
+/*!XXH_FORCE_ALIGN_CHECK :
+ * This is a minor performance trick, only useful with lots of very small keys.
+ * It means : check for aligned/unaligned input.
+ * The check costs one initial branch per hash;
+ * set it to 0 when the input is guaranteed to be aligned,
+ * or when alignment doesn't matter for performance.
+ */
+#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */
+# if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
+# define XXH_FORCE_ALIGN_CHECK 0
+# else
+# define XXH_FORCE_ALIGN_CHECK 1
+# endif
+#endif
+
+
+/* *************************************
+* Includes & Memory related functions
+***************************************/
+/*! Modify the local functions below should you wish to use some other memory routines
+* for malloc(), free() */
+#include <stdlib.h>
+static void* XXH_malloc(size_t s) { return malloc(s); }
+static void XXH_free (void* p) { free(p); }
+/*! and for memcpy() */
+#include <string.h>
+static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); }
+
+#include <assert.h> /* assert */
+
+#define XXH_STATIC_LINKING_ONLY
+#include "xxhash.h"
+
+
+/* *************************************
+* Compiler Specific Options
+***************************************/
+#ifdef _MSC_VER /* Visual Studio */
+# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
+# define FORCE_INLINE static __forceinline
+#else
+# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
+# ifdef __GNUC__
+# define FORCE_INLINE static inline __attribute__((always_inline))
+# else
+# define FORCE_INLINE static inline
+# endif
+# else
+# define FORCE_INLINE static
+# endif /* __STDC_VERSION__ */
+#endif
+
+
+/* *************************************
+* Basic Types
+***************************************/
+#ifndef MEM_MODULE
+# if !defined (__VMS) \
+ && (defined (__cplusplus) \
+ || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
+# include <stdint.h>
+ typedef uint8_t BYTE;
+ typedef uint16_t U16;
+ typedef uint32_t U32;
+# else
+ typedef unsigned char BYTE;
+ typedef unsigned short U16;
+ typedef unsigned int U32;
+# endif
+#endif
+
+#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
+
+/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
+static U32 XXH_read32(const void* memPtr) { return *(const U32*) memPtr; }
+
+#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
+
+/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
+/* currently only defined for gcc and icc */
+typedef union { U32 u32; } __attribute__((packed)) unalign;
+static U32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
+
+#else
+
+/* portable and safe solution. Generally efficient.
+ * see : http://stackoverflow.com/a/32095106/646947
+ */
+static U32 XXH_read32(const void* memPtr)
+{
+ U32 val;
+ memcpy(&val, memPtr, sizeof(val));
+ return val;
+}
+
+#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
+
+
+/* ****************************************
+* Compiler-specific Functions and Macros
+******************************************/
+#define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
+
+/* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */
+#if defined(_MSC_VER)
+# define XXH_rotl32(x,r) _rotl(x,r)
+# define XXH_rotl64(x,r) _rotl64(x,r)
+#else
+# define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r)))
+# define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r)))
+#endif
+
+#if defined(_MSC_VER) /* Visual Studio */
+# define XXH_swap32 _byteswap_ulong
+#elif XXH_GCC_VERSION >= 403
+# define XXH_swap32 __builtin_bswap32
+#else
+static U32 XXH_swap32 (U32 x)
+{
+ return ((x << 24) & 0xff000000 ) |
+ ((x << 8) & 0x00ff0000 ) |
+ ((x >> 8) & 0x0000ff00 ) |
+ ((x >> 24) & 0x000000ff );
+}
+#endif
+
+
+/* *************************************
+* Architecture Macros
+***************************************/
+typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess;
+
+/* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */
+#ifndef XXH_CPU_LITTLE_ENDIAN
+static int XXH_isLittleEndian(void)
+{
+ const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
+ return one.c[0];
+}
+# define XXH_CPU_LITTLE_ENDIAN (XXH_isLittleEndian())
+#endif
+
+
+/* ***************************
+* Memory reads
+*****************************/
+typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment;
+
+FORCE_INLINE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
+{
+ if (align==XXH_unaligned)
+ return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
+ else
+ return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr);
+}
+
+FORCE_INLINE U32 XXH_readLE32(const void* ptr, XXH_endianess endian)
+{
+ return XXH_readLE32_align(ptr, endian, XXH_unaligned);
+}
+
+static U32 XXH_readBE32(const void* ptr)
+{
+ return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr);
+}
+
+
+/* *************************************
+* Macros
+***************************************/
+#define XXH_STATIC_ASSERT(c) do { enum { XXH_sa = 1/(int)(!!(c)) }; } while(0) /* use after variable declarations */
+XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
+
+
+/* *******************************************************************
+* 32-bit hash functions
+*********************************************************************/
+static const U32 PRIME32_1 = 2654435761U; /* 0b10011110001101110111100110110001 */
+static const U32 PRIME32_2 = 2246822519U; /* 0b10000101111010111100101001110111 */
+static const U32 PRIME32_3 = 3266489917U; /* 0b11000010101100101010111000111101 */
+static const U32 PRIME32_4 = 668265263U; /* 0b00100111110101001110101100101111 */
+static const U32 PRIME32_5 = 374761393U; /* 0b00010110010101100110011110110001 */
+
+static U32 XXH32_round(U32 seed, U32 input)
+{
+ seed += input * PRIME32_2;
+ seed = XXH_rotl32(seed, 13);
+ seed *= PRIME32_1;
+ return seed;
+}
+
+/* mix all bits */
+static U32 XXH32_avalanche(U32 h32)
+{
+ h32 ^= h32 >> 15;
+ h32 *= PRIME32_2;
+ h32 ^= h32 >> 13;
+ h32 *= PRIME32_3;
+ h32 ^= h32 >> 16;
+ return(h32);
+}
+
+#define XXH_get32bits(p) XXH_readLE32_align(p, endian, align)
+
+static U32
+XXH32_finalize(U32 h32, const void* ptr, size_t len,
+ XXH_endianess endian, XXH_alignment align)
+
+{
+ const BYTE* p = (const BYTE*)ptr;
+
+#define PROCESS1 \
+ h32 += (*p++) * PRIME32_5; \
+ h32 = XXH_rotl32(h32, 11) * PRIME32_1
+
+#define PROCESS4 \
+ h32 += XXH_get32bits(p) * PRIME32_3; \
+ p+=4; \
+ h32 = XXH_rotl32(h32, 17) * PRIME32_4
+
+ switch(len&15) /* or switch(bEnd - p) */
+ {
+ case 12: PROCESS4;
+ /* fallthrough */
+ case 8: PROCESS4;
+ /* fallthrough */
+ case 4: PROCESS4;
+ return XXH32_avalanche(h32);
+
+ case 13: PROCESS4;
+ /* fallthrough */
+ case 9: PROCESS4;
+ /* fallthrough */
+ case 5: PROCESS4;
+ PROCESS1;
+ return XXH32_avalanche(h32);
+
+ case 14: PROCESS4;
+ /* fallthrough */
+ case 10: PROCESS4;
+ /* fallthrough */
+ case 6: PROCESS4;
+ PROCESS1;
+ PROCESS1;
+ return XXH32_avalanche(h32);
+
+ case 15: PROCESS4;
+ /* fallthrough */
+ case 11: PROCESS4;
+ /* fallthrough */
+ case 7: PROCESS4;
+ /* fallthrough */
+ case 3: PROCESS1;
+ /* fallthrough */
+ case 2: PROCESS1;
+ /* fallthrough */
+ case 1: PROCESS1;
+ /* fallthrough */
+ case 0: return XXH32_avalanche(h32);
+ }
+ assert(0);
+ return h32; /* reaching this point is deemed impossible */
+}
+
+
+FORCE_INLINE U32
+XXH32_endian_align(const void* input, size_t len, U32 seed,
+ XXH_endianess endian, XXH_alignment align)
+{
+ const BYTE* p = (const BYTE*)input;
+ const BYTE* bEnd = p + len;
+ U32 h32;
+
+#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
+ if (p==NULL) {
+ len=0;
+ bEnd=p=(const BYTE*)(size_t)16;
+ }
+#endif
+
+ if (len>=16) {
+ const BYTE* const limit = bEnd - 15;
+ U32 v1 = seed + PRIME32_1 + PRIME32_2;
+ U32 v2 = seed + PRIME32_2;
+ U32 v3 = seed + 0;
+ U32 v4 = seed - PRIME32_1;
+
+ do {
+ v1 = XXH32_round(v1, XXH_get32bits(p)); p+=4;
+ v2 = XXH32_round(v2, XXH_get32bits(p)); p+=4;
+ v3 = XXH32_round(v3, XXH_get32bits(p)); p+=4;
+ v4 = XXH32_round(v4, XXH_get32bits(p)); p+=4;
+ } while (p < limit);
+
+ h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7)
+ + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
+ } else {
+ h32 = seed + PRIME32_5;
+ }
+
+ h32 += (U32)len;
+
+ return XXH32_finalize(h32, p, len&15, endian, align);
+}
+
+
+XXH_PUBLIC_API unsigned int XXH32 (const void* input, size_t len, unsigned int seed)
+{
+#if 0
+ /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
+ XXH32_state_t state;
+ XXH32_reset(&state, seed);
+ XXH32_update(&state, input, len);
+ return XXH32_digest(&state);
+#else
+ XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
+
+ if (XXH_FORCE_ALIGN_CHECK) {
+ if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */
+ if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+ return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
+ else
+ return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
+ } }
+
+ if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+ return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
+ else
+ return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
+#endif
+}
+
+
+
+/*====== Hash streaming ======*/
+
+XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void)
+{
+ return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t));
+}
+XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr)
+{
+ XXH_free(statePtr);
+ return XXH_OK;
+}
+
+XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState)
+{
+ memcpy(dstState, srcState, sizeof(*dstState));
+}
+
+XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int seed)
+{
+ XXH32_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
+ memset(&state, 0, sizeof(state));
+ state.v1 = seed + PRIME32_1 + PRIME32_2;
+ state.v2 = seed + PRIME32_2;
+ state.v3 = seed + 0;
+ state.v4 = seed - PRIME32_1;
+ /* do not write into reserved, planned to be removed in a future version */
+ memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved));
+ return XXH_OK;
+}
+
+
+FORCE_INLINE XXH_errorcode
+XXH32_update_endian(XXH32_state_t* state, const void* input, size_t len, XXH_endianess endian)
+{
+ if (input==NULL)
+#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
+ return XXH_OK;
+#else
+ return XXH_ERROR;
+#endif
+
+ { const BYTE* p = (const BYTE*)input;
+ const BYTE* const bEnd = p + len;
+
+ state->total_len_32 += (unsigned)len;
+ state->large_len |= (len>=16) | (state->total_len_32>=16);
+
+ if (state->memsize + len < 16) { /* fill in tmp buffer */
+ XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len);
+ state->memsize += (unsigned)len;
+ return XXH_OK;
+ }
+
+ if (state->memsize) { /* some data left from previous update */
+ XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize);
+ { const U32* p32 = state->mem32;
+ state->v1 = XXH32_round(state->v1, XXH_readLE32(p32, endian)); p32++;
+ state->v2 = XXH32_round(state->v2, XXH_readLE32(p32, endian)); p32++;
+ state->v3 = XXH32_round(state->v3, XXH_readLE32(p32, endian)); p32++;
+ state->v4 = XXH32_round(state->v4, XXH_readLE32(p32, endian));
+ }
+ p += 16-state->memsize;
+ state->memsize = 0;
+ }
+
+ if (p <= bEnd-16) {
+ const BYTE* const limit = bEnd - 16;
+ U32 v1 = state->v1;
+ U32 v2 = state->v2;
+ U32 v3 = state->v3;
+ U32 v4 = state->v4;
+
+ do {
+ v1 = XXH32_round(v1, XXH_readLE32(p, endian)); p+=4;
+ v2 = XXH32_round(v2, XXH_readLE32(p, endian)); p+=4;
+ v3 = XXH32_round(v3, XXH_readLE32(p, endian)); p+=4;
+ v4 = XXH32_round(v4, XXH_readLE32(p, endian)); p+=4;
+ } while (p<=limit);
+
+ state->v1 = v1;
+ state->v2 = v2;
+ state->v3 = v3;
+ state->v4 = v4;
+ }
+
+ if (p < bEnd) {
+ XXH_memcpy(state->mem32, p, (size_t)(bEnd-p));
+ state->memsize = (unsigned)(bEnd-p);
+ }
+ }
+
+ return XXH_OK;
+}
+
+
+XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len)
+{
+ XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
+
+ if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+ return XXH32_update_endian(state_in, input, len, XXH_littleEndian);
+ else
+ return XXH32_update_endian(state_in, input, len, XXH_bigEndian);
+}
+
+
+FORCE_INLINE U32
+XXH32_digest_endian (const XXH32_state_t* state, XXH_endianess endian)
+{
+ U32 h32;
+
+ if (state->large_len) {
+ h32 = XXH_rotl32(state->v1, 1)
+ + XXH_rotl32(state->v2, 7)
+ + XXH_rotl32(state->v3, 12)
+ + XXH_rotl32(state->v4, 18);
+ } else {
+ h32 = state->v3 /* == seed */ + PRIME32_5;
+ }
+
+ h32 += state->total_len_32;
+
+ return XXH32_finalize(h32, state->mem32, state->memsize, endian, XXH_aligned);
+}
+
+
+XXH_PUBLIC_API unsigned int XXH32_digest (const XXH32_state_t* state_in)
+{
+ XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
+
+ if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+ return XXH32_digest_endian(state_in, XXH_littleEndian);
+ else
+ return XXH32_digest_endian(state_in, XXH_bigEndian);
+}
+
+
+/*====== Canonical representation ======*/
+
+/*! Default XXH result types are basic unsigned 32 and 64 bits.
+* The canonical representation follows human-readable write convention, aka big-endian (large digits first).
+* These functions allow transformation of hash result into and from its canonical format.
+* This way, hash values can be written into a file or buffer, remaining comparable across different systems.
+*/
+
+XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash)
+{
+ XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t));
+ if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash);
+ memcpy(dst, &hash, sizeof(*dst));
+}
+
+XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src)
+{
+ return XXH_readBE32(src);
+}
+
+
+#ifndef XXH_NO_LONG_LONG
+
+/* *******************************************************************
+* 64-bit hash functions
+*********************************************************************/
+
+/*====== Memory access ======*/
+
+#ifndef MEM_MODULE
+# define MEM_MODULE
+# if !defined (__VMS) \
+ && (defined (__cplusplus) \
+ || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
+# include <stdint.h>
+ typedef uint64_t U64;
+# else
+ /* if compiler doesn't support unsigned long long, replace by another 64-bit type */
+ typedef unsigned long long U64;
+# endif
+#endif
+
+
+#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
+
+/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
+static U64 XXH_read64(const void* memPtr) { return *(const U64*) memPtr; }
+
+#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
+
+/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
+/* currently only defined for gcc and icc */
+typedef union { U32 u32; U64 u64; } __attribute__((packed)) unalign64;
+static U64 XXH_read64(const void* ptr) { return ((const unalign64*)ptr)->u64; }
+
+#else
+
+/* portable and safe solution. Generally efficient.
+ * see : http://stackoverflow.com/a/32095106/646947
+ */
+
+static U64 XXH_read64(const void* memPtr)
+{
+ U64 val;
+ memcpy(&val, memPtr, sizeof(val));
+ return val;
+}
+
+#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
+
+#if defined(_MSC_VER) /* Visual Studio */
+# define XXH_swap64 _byteswap_uint64
+#elif XXH_GCC_VERSION >= 403
+# define XXH_swap64 __builtin_bswap64
+#else
+static U64 XXH_swap64 (U64 x)
+{
+ return ((x << 56) & 0xff00000000000000ULL) |
+ ((x << 40) & 0x00ff000000000000ULL) |
+ ((x << 24) & 0x0000ff0000000000ULL) |
+ ((x << 8) & 0x000000ff00000000ULL) |
+ ((x >> 8) & 0x00000000ff000000ULL) |
+ ((x >> 24) & 0x0000000000ff0000ULL) |
+ ((x >> 40) & 0x000000000000ff00ULL) |
+ ((x >> 56) & 0x00000000000000ffULL);
+}
+#endif
+
+FORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
+{
+ if (align==XXH_unaligned)
+ return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
+ else
+ return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr);
+}
+
+FORCE_INLINE U64 XXH_readLE64(const void* ptr, XXH_endianess endian)
+{
+ return XXH_readLE64_align(ptr, endian, XXH_unaligned);
+}
+
+static U64 XXH_readBE64(const void* ptr)
+{
+ return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr);
+}
+
+
+/*====== xxh64 ======*/
+
+static const U64 PRIME64_1 = 11400714785074694791ULL; /* 0b1001111000110111011110011011000110000101111010111100101010000111 */
+static const U64 PRIME64_2 = 14029467366897019727ULL; /* 0b1100001010110010101011100011110100100111110101001110101101001111 */
+static const U64 PRIME64_3 = 1609587929392839161ULL; /* 0b0001011001010110011001111011000110011110001101110111100111111001 */
+static const U64 PRIME64_4 = 9650029242287828579ULL; /* 0b1000010111101011110010100111011111000010101100101010111001100011 */
+static const U64 PRIME64_5 = 2870177450012600261ULL; /* 0b0010011111010100111010110010111100010110010101100110011111000101 */
+
+static U64 XXH64_round(U64 acc, U64 input)
+{
+ acc += input * PRIME64_2;
+ acc = XXH_rotl64(acc, 31);
+ acc *= PRIME64_1;
+ return acc;
+}
+
+static U64 XXH64_mergeRound(U64 acc, U64 val)
+{
+ val = XXH64_round(0, val);
+ acc ^= val;
+ acc = acc * PRIME64_1 + PRIME64_4;
+ return acc;
+}
+
+static U64 XXH64_avalanche(U64 h64)
+{
+ h64 ^= h64 >> 33;
+ h64 *= PRIME64_2;
+ h64 ^= h64 >> 29;
+ h64 *= PRIME64_3;
+ h64 ^= h64 >> 32;
+ return h64;
+}
+
+
+#define XXH_get64bits(p) XXH_readLE64_align(p, endian, align)
+
+static U64
+XXH64_finalize(U64 h64, const void* ptr, size_t len,
+ XXH_endianess endian, XXH_alignment align)
+{
+ const BYTE* p = (const BYTE*)ptr;
+
+#define PROCESS1_64 \
+ h64 ^= (*p++) * PRIME64_5; \
+ h64 = XXH_rotl64(h64, 11) * PRIME64_1
+
+#define PROCESS4_64 \
+ h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1; \
+ p+=4; \
+ h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3
+
+#define PROCESS8_64 do { \
+ U64 const k1 = XXH64_round(0, XXH_get64bits(p)); \
+ p+=8; \
+ h64 ^= k1; \
+ h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; \
+} while (0)
+
+ switch(len&31) {
+ case 24: PROCESS8_64;
+ /* fallthrough */
+ case 16: PROCESS8_64;
+ /* fallthrough */
+ case 8: PROCESS8_64;
+ return XXH64_avalanche(h64);
+
+ case 28: PROCESS8_64;
+ /* fallthrough */
+ case 20: PROCESS8_64;
+ /* fallthrough */
+ case 12: PROCESS8_64;
+ /* fallthrough */
+ case 4: PROCESS4_64;
+ return XXH64_avalanche(h64);
+
+ case 25: PROCESS8_64;
+ /* fallthrough */
+ case 17: PROCESS8_64;
+ /* fallthrough */
+ case 9: PROCESS8_64;
+ PROCESS1_64;
+ return XXH64_avalanche(h64);
+
+ case 29: PROCESS8_64;
+ /* fallthrough */
+ case 21: PROCESS8_64;
+ /* fallthrough */
+ case 13: PROCESS8_64;
+ /* fallthrough */
+ case 5: PROCESS4_64;
+ PROCESS1_64;
+ return XXH64_avalanche(h64);
+
+ case 26: PROCESS8_64;
+ /* fallthrough */
+ case 18: PROCESS8_64;
+ /* fallthrough */
+ case 10: PROCESS8_64;
+ PROCESS1_64;
+ PROCESS1_64;
+ return XXH64_avalanche(h64);
+
+ case 30: PROCESS8_64;
+ /* fallthrough */
+ case 22: PROCESS8_64;
+ /* fallthrough */
+ case 14: PROCESS8_64;
+ /* fallthrough */
+ case 6: PROCESS4_64;
+ PROCESS1_64;
+ PROCESS1_64;
+ return XXH64_avalanche(h64);
+
+ case 27: PROCESS8_64;
+ /* fallthrough */
+ case 19: PROCESS8_64;
+ /* fallthrough */
+ case 11: PROCESS8_64;
+ PROCESS1_64;
+ PROCESS1_64;
+ PROCESS1_64;
+ return XXH64_avalanche(h64);
+
+ case 31: PROCESS8_64;
+ /* fallthrough */
+ case 23: PROCESS8_64;
+ /* fallthrough */
+ case 15: PROCESS8_64;
+ /* fallthrough */
+ case 7: PROCESS4_64;
+ /* fallthrough */
+ case 3: PROCESS1_64;
+ /* fallthrough */
+ case 2: PROCESS1_64;
+ /* fallthrough */
+ case 1: PROCESS1_64;
+ /* fallthrough */
+ case 0: return XXH64_avalanche(h64);
+ }
+
+ /* impossible to reach */
+ assert(0);
+ return 0; /* unreachable, but some compilers complain without it */
+}
+
+FORCE_INLINE U64
+XXH64_endian_align(const void* input, size_t len, U64 seed,
+ XXH_endianess endian, XXH_alignment align)
+{
+ const BYTE* p = (const BYTE*)input;
+ const BYTE* bEnd = p + len;
+ U64 h64;
+
+#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
+ if (p==NULL) {
+ len=0;
+ bEnd=p=(const BYTE*)(size_t)32;
+ }
+#endif
+
+ if (len>=32) {
+ const BYTE* const limit = bEnd - 32;
+ U64 v1 = seed + PRIME64_1 + PRIME64_2;
+ U64 v2 = seed + PRIME64_2;
+ U64 v3 = seed + 0;
+ U64 v4 = seed - PRIME64_1;
+
+ do {
+ v1 = XXH64_round(v1, XXH_get64bits(p)); p+=8;
+ v2 = XXH64_round(v2, XXH_get64bits(p)); p+=8;
+ v3 = XXH64_round(v3, XXH_get64bits(p)); p+=8;
+ v4 = XXH64_round(v4, XXH_get64bits(p)); p+=8;
+ } while (p<=limit);
+
+ h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
+ h64 = XXH64_mergeRound(h64, v1);
+ h64 = XXH64_mergeRound(h64, v2);
+ h64 = XXH64_mergeRound(h64, v3);
+ h64 = XXH64_mergeRound(h64, v4);
+
+ } else {
+ h64 = seed + PRIME64_5;
+ }
+
+ h64 += (U64) len;
+
+ return XXH64_finalize(h64, p, len, endian, align);
+}
+
+
+XXH_PUBLIC_API unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed)
+{
+#if 0
+ /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
+ XXH64_state_t state;
+ XXH64_reset(&state, seed);
+ XXH64_update(&state, input, len);
+ return XXH64_digest(&state);
+#else
+ XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
+
+ if (XXH_FORCE_ALIGN_CHECK) {
+ if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */
+ if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+ return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
+ else
+ return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
+ } }
+
+ if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+ return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
+ else
+ return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
+#endif
+}
+
+/*====== Hash Streaming ======*/
+
+XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void)
+{
+ return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
+}
+XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr)
+{
+ XXH_free(statePtr);
+ return XXH_OK;
+}
+
+XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dstState, const XXH64_state_t* srcState)
+{
+ memcpy(dstState, srcState, sizeof(*dstState));
+}
+
+XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed)
+{
+ XXH64_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
+ memset(&state, 0, sizeof(state));
+ state.v1 = seed + PRIME64_1 + PRIME64_2;
+ state.v2 = seed + PRIME64_2;
+ state.v3 = seed + 0;
+ state.v4 = seed - PRIME64_1;
+ /* do not write into reserved, planned to be removed in a future version */
+ memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved));
+ return XXH_OK;
+}
+
+FORCE_INLINE XXH_errorcode
+XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian)
+{
+ if (input==NULL)
+#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
+ return XXH_OK;
+#else
+ return XXH_ERROR;
+#endif
+
+ { const BYTE* p = (const BYTE*)input;
+ const BYTE* const bEnd = p + len;
+
+ state->total_len += len;
+
+ if (state->memsize + len < 32) { /* fill in tmp buffer */
+ XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len);
+ state->memsize += (U32)len;
+ return XXH_OK;
+ }
+
+ if (state->memsize) { /* tmp buffer is full */
+ XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize);
+ state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0, endian));
+ state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1, endian));
+ state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2, endian));
+ state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3, endian));
+ p += 32-state->memsize;
+ state->memsize = 0;
+ }
+
+ if (p+32 <= bEnd) {
+ const BYTE* const limit = bEnd - 32;
+ U64 v1 = state->v1;
+ U64 v2 = state->v2;
+ U64 v3 = state->v3;
+ U64 v4 = state->v4;
+
+ do {
+ v1 = XXH64_round(v1, XXH_readLE64(p, endian)); p+=8;
+ v2 = XXH64_round(v2, XXH_readLE64(p, endian)); p+=8;
+ v3 = XXH64_round(v3, XXH_readLE64(p, endian)); p+=8;
+ v4 = XXH64_round(v4, XXH_readLE64(p, endian)); p+=8;
+ } while (p<=limit);
+
+ state->v1 = v1;
+ state->v2 = v2;
+ state->v3 = v3;
+ state->v4 = v4;
+ }
+
+ if (p < bEnd) {
+ XXH_memcpy(state->mem64, p, (size_t)(bEnd-p));
+ state->memsize = (unsigned)(bEnd-p);
+ }
+ }
+
+ return XXH_OK;
+}
+
+XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t len)
+{
+ XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
+
+ if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+ return XXH64_update_endian(state_in, input, len, XXH_littleEndian);
+ else
+ return XXH64_update_endian(state_in, input, len, XXH_bigEndian);
+}
+
+FORCE_INLINE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess endian)
+{
+ U64 h64;
+
+ if (state->total_len >= 32) {
+ U64 const v1 = state->v1;
+ U64 const v2 = state->v2;
+ U64 const v3 = state->v3;
+ U64 const v4 = state->v4;
+
+ h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
+ h64 = XXH64_mergeRound(h64, v1);
+ h64 = XXH64_mergeRound(h64, v2);
+ h64 = XXH64_mergeRound(h64, v3);
+ h64 = XXH64_mergeRound(h64, v4);
+ } else {
+ h64 = state->v3 /*seed*/ + PRIME64_5;
+ }
+
+ h64 += (U64) state->total_len;
+
+ return XXH64_finalize(h64, state->mem64, (size_t)state->total_len, endian, XXH_aligned);
+}
+
+XXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* state_in)
+{
+ XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
+
+ if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+ return XXH64_digest_endian(state_in, XXH_littleEndian);
+ else
+ return XXH64_digest_endian(state_in, XXH_bigEndian);
+}
+
+
+/*====== Canonical representation ======*/
+
+XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash)
+{
+ XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t));
+ if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash);
+ memcpy(dst, &hash, sizeof(*dst));
+}
+
+XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src)
+{
+ return XXH_readBE64(src);
+}
+
+#endif /* XXH_NO_LONG_LONG */
diff --git a/gfx/angle/checkout/src/common/third_party/xxhash/xxhash.h b/gfx/angle/checkout/src/common/third_party/xxhash/xxhash.h
new file mode 100644
index 0000000000..0de203c947
--- /dev/null
+++ b/gfx/angle/checkout/src/common/third_party/xxhash/xxhash.h
@@ -0,0 +1,341 @@
+/*
+ xxHash - Extremely Fast Hash algorithm
+ Header File
+ Copyright (C) 2012-2016, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - xxHash source repository : https://github.com/Cyan4973/xxHash
+*/
+
+/* Notice extracted from xxHash homepage :
+
+xxHash is an extremely fast Hash algorithm, running at RAM speed limits.
+It also successfully passes all tests from the SMHasher suite.
+
+Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo @3GHz)
+
+Name Speed Q.Score Author
+xxHash 5.4 GB/s 10
+CrapWow 3.2 GB/s 2 Andrew
+MumurHash 3a 2.7 GB/s 10 Austin Appleby
+SpookyHash 2.0 GB/s 10 Bob Jenkins
+SBox 1.4 GB/s 9 Bret Mulvey
+Lookup3 1.2 GB/s 9 Bob Jenkins
+SuperFastHash 1.2 GB/s 1 Paul Hsieh
+CityHash64 1.05 GB/s 10 Pike & Alakuijala
+FNV 0.55 GB/s 5 Fowler, Noll, Vo
+CRC32 0.43 GB/s 9
+MD5-32 0.33 GB/s 10 Ronald L. Rivest
+SHA1-32 0.28 GB/s 10
+
+Q.Score is a measure of quality of the hash function.
+It depends on successfully passing SMHasher test set.
+10 is a perfect score.
+
+A 64-bit version, named XXH64, is available since r35.
+It offers much better speed, but for 64-bit applications only.
+Name Speed on 64 bits Speed on 32 bits
+XXH64 13.8 GB/s 1.9 GB/s
+XXH32 6.8 GB/s 6.0 GB/s
+*/
+
+#ifndef XXHASH_H_5627135585666179
+#define XXHASH_H_5627135585666179 1
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* ****************************
+ * Definitions
+ ******************************/
+#include <stddef.h> /* size_t */
+typedef enum
+{
+ XXH_OK = 0,
+ XXH_ERROR
+} XXH_errorcode;
+
+/* ****************************
+ * API modifier
+ ******************************/
+/** XXH_INLINE_ALL (and XXH_PRIVATE_API)
+ * This is useful to include xxhash functions in `static` mode
+ * in order to inline them, and remove their symbol from the public list.
+ * Inlining can offer dramatic performance improvement on small keys.
+ * Methodology :
+ * #define XXH_INLINE_ALL
+ * #include "xxhash.h"
+ * `xxhash.c` is automatically included.
+ * It's not useful to compile and link it as a separate module.
+ */
+#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)
+# ifndef XXH_STATIC_LINKING_ONLY
+# define XXH_STATIC_LINKING_ONLY
+# endif
+# if defined(__GNUC__)
+# define XXH_PUBLIC_API static __inline __attribute__((unused))
+#elif defined(__cplusplus) || (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 \
+ */)
+# define XXH_PUBLIC_API static inline
+# elif defined(_MSC_VER)
+# define XXH_PUBLIC_API static __inline
+# else
+/* this version may generate warnings for unused static functions */
+# define XXH_PUBLIC_API static
+# endif
+#else
+# define XXH_PUBLIC_API /* do nothing */
+#endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */
+
+/*! XXH_NAMESPACE, aka Namespace Emulation :
+ *
+ * If you want to include _and expose_ xxHash functions from within your own library,
+ * but also want to avoid symbol collisions with other libraries which may also include xxHash,
+ *
+ * you can use XXH_NAMESPACE, to automatically prefix any public symbol from xxhash library
+ * with the value of XXH_NAMESPACE (therefore, avoid NULL and numeric values).
+ *
+ * Note that no change is required within the calling program as long as it includes `xxhash.h` :
+ * regular symbol name will be automatically translated by this header.
+ */
+#ifdef XXH_NAMESPACE
+# define XXH_CAT(A, B) A##B
+# define XXH_NAME2(A, B) XXH_CAT(A, B)
+# define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber)
+# define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32)
+# define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState)
+# define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState)
+# define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset)
+# define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update)
+# define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest)
+# define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState)
+# define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash)
+# define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical)
+# define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64)
+# define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState)
+# define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState)
+# define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset)
+# define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update)
+# define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest)
+# define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState)
+# define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash)
+# define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical)
+#endif
+
+/* *************************************
+ * Version
+ ***************************************/
+#define XXH_VERSION_MAJOR 0
+#define XXH_VERSION_MINOR 6
+#define XXH_VERSION_RELEASE 5
+#define XXH_VERSION_NUMBER \
+ (XXH_VERSION_MAJOR * 100 * 100 + XXH_VERSION_MINOR * 100 + XXH_VERSION_RELEASE)
+XXH_PUBLIC_API unsigned XXH_versionNumber(void);
+
+/*-**********************************************************************
+ * 32-bit hash
+ ************************************************************************/
+typedef unsigned int XXH32_hash_t;
+
+/*! XXH32() :
+ Calculate the 32-bit hash of sequence "length" bytes stored at memory address "input".
+ The memory between input & input+length must be valid (allocated and read-accessible).
+ "seed" can be used to alter the result predictably.
+ Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s */
+XXH_PUBLIC_API XXH32_hash_t XXH32(const void *input, size_t length, unsigned int seed);
+
+/*====== Streaming ======*/
+typedef struct XXH32_state_s XXH32_state_t; /* incomplete type */
+XXH_PUBLIC_API XXH32_state_t *XXH32_createState(void);
+XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t *statePtr);
+XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t *dst_state, const XXH32_state_t *src_state);
+
+XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t *statePtr, unsigned int seed);
+XXH_PUBLIC_API XXH_errorcode XXH32_update(XXH32_state_t *statePtr,
+ const void *input,
+ size_t length);
+XXH_PUBLIC_API XXH32_hash_t XXH32_digest(const XXH32_state_t *statePtr);
+
+/*
+ * Streaming functions generate the xxHash of an input provided in multiple segments.
+ * Note that, for small input, they are slower than single-call functions, due to state
+ * management. For small inputs, prefer `XXH32()` and `XXH64()`, which are better optimized.
+ *
+ * XXH state must first be allocated, using XXH*_createState() .
+ *
+ * Start a new hash by initializing state with a seed, using XXH*_reset().
+ *
+ * Then, feed the hash state by calling XXH*_update() as many times as necessary.
+ * The function returns an error code, with 0 meaning OK, and any other value meaning there is
+ * an error.
+ *
+ * Finally, a hash value can be produced anytime, by using XXH*_digest().
+ * This function returns the nn-bits hash as an int or long long.
+ *
+ * It's still possible to continue inserting input into the hash state after a digest,
+ * and generate some new hashes later on, by calling again XXH*_digest().
+ *
+ * When done, free XXH state space if it was allocated dynamically.
+ */
+
+/*====== Canonical representation ======*/
+
+typedef struct
+{
+ unsigned char digest[4];
+} XXH32_canonical_t;
+XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t *dst, XXH32_hash_t hash);
+XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t *src);
+
+/* Default result type for XXH functions are primitive unsigned 32 and 64 bits.
+ * The canonical representation uses human-readable write convention, aka big-endian (large
+ * digits first). These functions allow transformation of hash result into and from its
+ * canonical format. This way, hash values can be written into a file / memory, and remain
+ * comparable on different systems and programs.
+ */
+
+#ifndef XXH_NO_LONG_LONG
+/*-**********************************************************************
+ * 64-bit hash
+ ************************************************************************/
+typedef unsigned long long XXH64_hash_t;
+
+/*! XXH64() :
+ Calculate the 64-bit hash of sequence of length "len" stored at memory address "input".
+ "seed" can be used to alter the result predictably.
+ This function runs faster on 64-bit systems, but slower on 32-bit systems (see benchmark).
+*/
+XXH_PUBLIC_API XXH64_hash_t XXH64(const void *input, size_t length, unsigned long long seed);
+
+/*====== Streaming ======*/
+typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */
+XXH_PUBLIC_API XXH64_state_t *XXH64_createState(void);
+XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t *statePtr);
+XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t *dst_state, const XXH64_state_t *src_state);
+
+XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t *statePtr, unsigned long long seed);
+XXH_PUBLIC_API XXH_errorcode XXH64_update(XXH64_state_t *statePtr,
+ const void *input,
+ size_t length);
+XXH_PUBLIC_API XXH64_hash_t XXH64_digest(const XXH64_state_t *statePtr);
+
+/*====== Canonical representation ======*/
+typedef struct
+{
+ unsigned char digest[8];
+} XXH64_canonical_t;
+XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t *dst, XXH64_hash_t hash);
+XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t *src);
+#endif /* XXH_NO_LONG_LONG */
+
+#ifdef XXH_STATIC_LINKING_ONLY
+
+/* ================================================================================================
+ This section contains declarations which are not guaranteed to remain stable.
+ They may change in future versions, becoming incompatible with a different version of the
+library. These declarations should only be used with static linking. Never use them in
+association with dynamic linking !
+===================================================================================================
+*/
+
+/* These definitions are only present to allow
+ * static allocation of XXH state, on stack or in a struct for example.
+ * Never **ever** use members directly. */
+
+# if !defined(__VMS) && (defined(__cplusplus) || (defined(__STDC_VERSION__) && \
+ (__STDC_VERSION__ >= 199901L) /* C99 */))
+# include <stdint.h>
+
+struct XXH32_state_s
+{
+ uint32_t total_len_32;
+ uint32_t large_len;
+ uint32_t v1;
+ uint32_t v2;
+ uint32_t v3;
+ uint32_t v4;
+ uint32_t mem32[4];
+ uint32_t memsize;
+ uint32_t reserved; /* never read nor write, might be removed in a future version */
+}; /* typedef'd to XXH32_state_t */
+
+struct XXH64_state_s
+{
+ uint64_t total_len;
+ uint64_t v1;
+ uint64_t v2;
+ uint64_t v3;
+ uint64_t v4;
+ uint64_t mem64[4];
+ uint32_t memsize;
+ uint32_t reserved[2]; /* never read nor write, might be removed in a future version */
+}; /* typedef'd to XXH64_state_t */
+
+# else
+
+struct XXH32_state_s
+{
+ unsigned total_len_32;
+ unsigned large_len;
+ unsigned v1;
+ unsigned v2;
+ unsigned v3;
+ unsigned v4;
+ unsigned mem32[4];
+ unsigned memsize;
+ unsigned reserved; /* never read nor write, might be removed in a future version */
+}; /* typedef'd to XXH32_state_t */
+
+# ifndef XXH_NO_LONG_LONG /* remove 64-bit support */
+struct XXH64_state_s
+{
+ unsigned long long total_len;
+ unsigned long long v1;
+ unsigned long long v2;
+ unsigned long long v3;
+ unsigned long long v4;
+ unsigned long long mem64[4];
+ unsigned memsize;
+ unsigned reserved[2]; /* never read nor write, might be removed in a future version */
+}; /* typedef'd to XXH64_state_t */
+# endif
+
+# endif
+
+# if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)
+# include "xxhash.c" /* include xxhash function bodies as `static`, for inlining */
+# endif
+
+#endif /* XXH_STATIC_LINKING_ONLY */
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* XXHASH_H_5627135585666179 */