summaryrefslogtreecommitdiffstats
path: root/gfx/angle/checkout/src/common
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 19:33:14 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 19:33:14 +0000
commit36d22d82aa202bb199967e9512281e9a53db42c9 (patch)
tree105e8c98ddea1c1e4784a60a5a6410fa416be2de /gfx/angle/checkout/src/common
parentInitial commit. (diff)
downloadfirefox-esr-upstream.tar.xz
firefox-esr-upstream.zip
Adding upstream version 115.7.0esr.upstream/115.7.0esrupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'gfx/angle/checkout/src/common')
-rw-r--r--gfx/angle/checkout/src/common/CircularBuffer.h175
-rw-r--r--gfx/angle/checkout/src/common/Color.h104
-rw-r--r--gfx/angle/checkout/src/common/Color.inc69
-rw-r--r--gfx/angle/checkout/src/common/FastVector.h891
-rw-r--r--gfx/angle/checkout/src/common/FixedVector.h353
-rw-r--r--gfx/angle/checkout/src/common/Float16ToFloat32.cpp300
-rw-r--r--gfx/angle/checkout/src/common/MemoryBuffer.cpp179
-rw-r--r--gfx/angle/checkout/src/common/MemoryBuffer.h93
-rw-r--r--gfx/angle/checkout/src/common/Optional.h74
-rw-r--r--gfx/angle/checkout/src/common/PackedEGLEnums_autogen.cpp452
-rw-r--r--gfx/angle/checkout/src/common/PackedEGLEnums_autogen.h144
-rw-r--r--gfx/angle/checkout/src/common/PackedEnums.cpp673
-rw-r--r--gfx/angle/checkout/src/common/PackedEnums.h859
-rw-r--r--gfx/angle/checkout/src/common/PackedGLEnums_autogen.cpp2449
-rw-r--r--gfx/angle/checkout/src/common/PackedGLEnums_autogen.h610
-rw-r--r--gfx/angle/checkout/src/common/PoolAlloc.cpp487
-rw-r--r--gfx/angle/checkout/src/common/PoolAlloc.h181
-rw-r--r--gfx/angle/checkout/src/common/Spinlock.h71
-rw-r--r--gfx/angle/checkout/src/common/SynchronizedValue.h540
-rw-r--r--gfx/angle/checkout/src/common/aligned_memory.cpp64
-rw-r--r--gfx/angle/checkout/src/common/aligned_memory.h23
-rw-r--r--gfx/angle/checkout/src/common/android_util.cpp424
-rw-r--r--gfx/angle/checkout/src/common/android_util.h59
-rw-r--r--gfx/angle/checkout/src/common/angle_version.h28
-rw-r--r--gfx/angle/checkout/src/common/angle_version_info.cpp40
-rw-r--r--gfx/angle/checkout/src/common/angle_version_info.h20
-rw-r--r--gfx/angle/checkout/src/common/angleutils.cpp156
-rw-r--r--gfx/angle/checkout/src/common/angleutils.h601
-rw-r--r--gfx/angle/checkout/src/common/apple_platform_utils.h90
-rw-r--r--gfx/angle/checkout/src/common/bitset_utils.h1106
-rw-r--r--gfx/angle/checkout/src/common/debug.cpp349
-rw-r--r--gfx/angle/checkout/src/common/debug.h468
-rw-r--r--gfx/angle/checkout/src/common/entry_points_enum_autogen.cpp3454
-rw-r--r--gfx/angle/checkout/src/common/entry_points_enum_autogen.h1736
-rw-r--r--gfx/angle/checkout/src/common/event_tracer.cpp53
-rw-r--r--gfx/angle/checkout/src/common/event_tracer.h26
-rw-r--r--gfx/angle/checkout/src/common/hash_utils.h39
-rw-r--r--gfx/angle/checkout/src/common/mathutil.cpp83
-rw-r--r--gfx/angle/checkout/src/common/mathutil.h1482
-rw-r--r--gfx/angle/checkout/src/common/matrix_utils.cpp285
-rw-r--r--gfx/angle/checkout/src/common/matrix_utils.h424
-rw-r--r--gfx/angle/checkout/src/common/platform.h209
-rw-r--r--gfx/angle/checkout/src/common/spirv/spirv_types.h133
-rw-r--r--gfx/angle/checkout/src/common/string_utils.cpp357
-rw-r--r--gfx/angle/checkout/src/common/string_utils.h125
-rw-r--r--gfx/angle/checkout/src/common/system_utils.cpp267
-rw-r--r--gfx/angle/checkout/src/common/system_utils.h224
-rw-r--r--gfx/angle/checkout/src/common/system_utils_apple.cpp59
-rw-r--r--gfx/angle/checkout/src/common/system_utils_linux.cpp55
-rw-r--r--gfx/angle/checkout/src/common/system_utils_mac.cpp28
-rw-r--r--gfx/angle/checkout/src/common/system_utils_posix.cpp470
-rw-r--r--gfx/angle/checkout/src/common/system_utils_win.cpp264
-rw-r--r--gfx/angle/checkout/src/common/system_utils_win32.cpp235
-rw-r--r--gfx/angle/checkout/src/common/third_party/base/anglebase/base_export.h13
-rw-r--r--gfx/angle/checkout/src/common/third_party/base/anglebase/containers/mru_cache.h275
-rw-r--r--gfx/angle/checkout/src/common/third_party/base/anglebase/logging.h26
-rw-r--r--gfx/angle/checkout/src/common/third_party/base/anglebase/macros.h17
-rw-r--r--gfx/angle/checkout/src/common/third_party/base/anglebase/no_destructor.h106
-rw-r--r--gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/checked_math.h384
-rw-r--r--gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/checked_math_impl.h641
-rw-r--r--gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/clamped_math.h270
-rw-r--r--gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/clamped_math_impl.h368
-rw-r--r--gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/math_constants.h20
-rw-r--r--gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/ranges.h39
-rw-r--r--gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_conversions.h403
-rw-r--r--gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_conversions_arm_impl.h60
-rw-r--r--gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_conversions_impl.h893
-rw-r--r--gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_math.h12
-rw-r--r--gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_math_arm_impl.h131
-rw-r--r--gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_math_clang_gcc_impl.h182
-rw-r--r--gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_math_shared_impl.h227
-rw-r--r--gfx/angle/checkout/src/common/third_party/base/anglebase/sha1.cc245
-rw-r--r--gfx/angle/checkout/src/common/third_party/base/anglebase/sha1.h36
-rw-r--r--gfx/angle/checkout/src/common/third_party/base/anglebase/sys_byteorder.h49
-rw-r--r--gfx/angle/checkout/src/common/third_party/smhasher/src/PMurHash.cpp339
-rw-r--r--gfx/angle/checkout/src/common/third_party/smhasher/src/PMurHash.h57
-rw-r--r--gfx/angle/checkout/src/common/third_party/xxhash/xxhash.c1030
-rw-r--r--gfx/angle/checkout/src/common/third_party/xxhash/xxhash.h341
-rw-r--r--gfx/angle/checkout/src/common/tls.cpp156
-rw-r--r--gfx/angle/checkout/src/common/tls.h54
-rw-r--r--gfx/angle/checkout/src/common/uniform_type_info_autogen.cpp378
-rw-r--r--gfx/angle/checkout/src/common/utilities.cpp1509
-rw-r--r--gfx/angle/checkout/src/common/utilities.h336
-rw-r--r--gfx/angle/checkout/src/common/vector_utils.h523
-rw-r--r--gfx/angle/checkout/src/common/vulkan/libvulkan_loader.cpp57
-rw-r--r--gfx/angle/checkout/src/common/vulkan/libvulkan_loader.h23
-rw-r--r--gfx/angle/checkout/src/common/vulkan/vk_google_filtering_precision.h57
-rw-r--r--gfx/angle/checkout/src/common/vulkan/vk_headers.h163
-rw-r--r--gfx/angle/checkout/src/common/vulkan/vulkan_icd.cpp349
-rw-r--r--gfx/angle/checkout/src/common/vulkan/vulkan_icd.h72
90 files changed, 31981 insertions, 0 deletions
diff --git a/gfx/angle/checkout/src/common/CircularBuffer.h b/gfx/angle/checkout/src/common/CircularBuffer.h
new file mode 100644
index 0000000000..3ff5f14d1b
--- /dev/null
+++ b/gfx/angle/checkout/src/common/CircularBuffer.h
@@ -0,0 +1,175 @@
+//
+// Copyright 2021 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// CircularBuffer.h:
+// An array class with an index that loops through the elements.
+//
+
+#ifndef COMMON_CIRCULARBUFFER_H_
+#define COMMON_CIRCULARBUFFER_H_
+
+#include "common/debug.h"
+
+#include <algorithm>
+#include <array>
+
+namespace angle
+{
+template <class T, size_t N, class Storage = std::array<T, N>>
+class CircularBuffer final
+{
+ public:
+ using value_type = typename Storage::value_type;
+ using size_type = typename Storage::size_type;
+ using reference = typename Storage::reference;
+ using const_reference = typename Storage::const_reference;
+ using pointer = typename Storage::pointer;
+ using const_pointer = typename Storage::const_pointer;
+ using iterator = typename Storage::iterator;
+ using const_iterator = typename Storage::const_iterator;
+
+ CircularBuffer();
+ CircularBuffer(const value_type &value);
+
+ CircularBuffer(const CircularBuffer<T, N, Storage> &other);
+ CircularBuffer(CircularBuffer<T, N, Storage> &&other);
+
+ CircularBuffer<T, N, Storage> &operator=(const CircularBuffer<T, N, Storage> &other);
+ CircularBuffer<T, N, Storage> &operator=(CircularBuffer<T, N, Storage> &&other);
+
+ ~CircularBuffer();
+
+ // begin() and end() are used to iterate over all elements regardless of the current position of
+ // the front of the buffer. Useful for initialization and clean up, as otherwise only the front
+ // element is expected to be accessed.
+ iterator begin();
+ const_iterator begin() const;
+
+ iterator end();
+ const_iterator end() const;
+
+ size_type size() const;
+
+ reference front();
+ const_reference front() const;
+
+ void swap(CircularBuffer<T, N, Storage> &other);
+
+ // Move the front forward to the next index, looping back to the beginning if the end of the
+ // array is reached.
+ void next();
+
+ private:
+ Storage mData;
+ size_type mFrontIndex;
+};
+
+template <class T, size_t N, class Storage>
+CircularBuffer<T, N, Storage>::CircularBuffer() : mFrontIndex(0)
+{}
+
+template <class T, size_t N, class Storage>
+CircularBuffer<T, N, Storage>::CircularBuffer(const value_type &value) : CircularBuffer()
+{
+ std::fill(begin(), end(), value);
+}
+
+template <class T, size_t N, class Storage>
+CircularBuffer<T, N, Storage>::CircularBuffer(const CircularBuffer<T, N, Storage> &other)
+{
+ *this = other;
+}
+
+template <class T, size_t N, class Storage>
+CircularBuffer<T, N, Storage>::CircularBuffer(CircularBuffer<T, N, Storage> &&other)
+ : CircularBuffer()
+{
+ swap(other);
+}
+
+template <class T, size_t N, class Storage>
+CircularBuffer<T, N, Storage> &CircularBuffer<T, N, Storage>::operator=(
+ const CircularBuffer<T, N, Storage> &other)
+{
+ std::copy(other.begin(), other.end(), begin());
+ mFrontIndex = other.mFrontIndex;
+ return *this;
+}
+
+template <class T, size_t N, class Storage>
+CircularBuffer<T, N, Storage> &CircularBuffer<T, N, Storage>::operator=(
+ CircularBuffer<T, N, Storage> &&other)
+{
+ swap(other);
+ return *this;
+}
+
+template <class T, size_t N, class Storage>
+CircularBuffer<T, N, Storage>::~CircularBuffer() = default;
+
+template <class T, size_t N, class Storage>
+ANGLE_INLINE typename CircularBuffer<T, N, Storage>::iterator CircularBuffer<T, N, Storage>::begin()
+{
+ return mData.begin();
+}
+
+template <class T, size_t N, class Storage>
+ANGLE_INLINE typename CircularBuffer<T, N, Storage>::const_iterator
+CircularBuffer<T, N, Storage>::begin() const
+{
+ return mData.begin();
+}
+
+template <class T, size_t N, class Storage>
+ANGLE_INLINE typename CircularBuffer<T, N, Storage>::iterator CircularBuffer<T, N, Storage>::end()
+{
+ return mData.end();
+}
+
+template <class T, size_t N, class Storage>
+ANGLE_INLINE typename CircularBuffer<T, N, Storage>::const_iterator
+CircularBuffer<T, N, Storage>::end() const
+{
+ return mData.end();
+}
+
+template <class T, size_t N, class Storage>
+ANGLE_INLINE typename CircularBuffer<T, N, Storage>::size_type CircularBuffer<T, N, Storage>::size()
+ const
+{
+ return N;
+}
+
+template <class T, size_t N, class Storage>
+ANGLE_INLINE typename CircularBuffer<T, N, Storage>::reference
+CircularBuffer<T, N, Storage>::front()
+{
+ ASSERT(mFrontIndex < size());
+ return mData[mFrontIndex];
+}
+
+template <class T, size_t N, class Storage>
+ANGLE_INLINE typename CircularBuffer<T, N, Storage>::const_reference
+CircularBuffer<T, N, Storage>::front() const
+{
+ ASSERT(mFrontIndex < size());
+ return mData[mFrontIndex];
+}
+
+template <class T, size_t N, class Storage>
+void CircularBuffer<T, N, Storage>::swap(CircularBuffer<T, N, Storage> &other)
+{
+ std::swap(mData, other.mData);
+ std::swap(mFrontIndex, other.mFrontIndex);
+}
+
+template <class T, size_t N, class Storage>
+void CircularBuffer<T, N, Storage>::next()
+{
+ mFrontIndex = (mFrontIndex + 1) % size();
+}
+} // namespace angle
+
+#endif // COMMON_CIRCULARBUFFER_H_
diff --git a/gfx/angle/checkout/src/common/Color.h b/gfx/angle/checkout/src/common/Color.h
new file mode 100644
index 0000000000..b228d8e8c5
--- /dev/null
+++ b/gfx/angle/checkout/src/common/Color.h
@@ -0,0 +1,104 @@
+//
+// Copyright 2016 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+
+// Color.h : Defines the Color type used throughout the ANGLE libraries
+
+#ifndef COMMON_COLOR_H_
+#define COMMON_COLOR_H_
+
+#include <cstdint>
+
+namespace angle
+{
+
+template <typename T>
+struct Color
+{
+ Color();
+ constexpr Color(T r, T g, T b, T a);
+
+ const T *data() const { return &red; }
+ T *ptr() { return &red; }
+
+ static Color fromData(const T *data) { return Color(data[0], data[1], data[2], data[3]); }
+ void writeData(T *data) const
+ {
+ data[0] = red;
+ data[1] = green;
+ data[2] = blue;
+ data[3] = alpha;
+ }
+
+ T red;
+ T green;
+ T blue;
+ T alpha;
+};
+
+template <typename T>
+bool operator==(const Color<T> &a, const Color<T> &b);
+
+template <typename T>
+bool operator!=(const Color<T> &a, const Color<T> &b);
+
+typedef Color<float> ColorF;
+typedef Color<int> ColorI;
+typedef Color<unsigned int> ColorUI;
+
+struct ColorGeneric
+{
+ inline ColorGeneric();
+ inline ColorGeneric(const ColorF &color);
+ inline ColorGeneric(const ColorI &color);
+ inline ColorGeneric(const ColorUI &color);
+
+ enum class Type : uint8_t
+ {
+ Float = 0,
+ Int = 1,
+ UInt = 2
+ };
+
+ union
+ {
+ ColorF colorF;
+ ColorI colorI;
+ ColorUI colorUI;
+ };
+
+ Type type;
+};
+
+inline bool operator==(const ColorGeneric &a, const ColorGeneric &b);
+
+inline bool operator!=(const ColorGeneric &a, const ColorGeneric &b);
+
+struct DepthStencil
+{
+ DepthStencil() : depth(0), stencil(0) {}
+
+ // Double is needed to represent the 32-bit integer range of GL_DEPTH_COMPONENT32.
+ double depth;
+ uint32_t stencil;
+};
+} // namespace angle
+
+// TODO: Move this fully into the angle namespace
+namespace gl
+{
+
+template <typename T>
+using Color = angle::Color<T>;
+using ColorF = angle::ColorF;
+using ColorI = angle::ColorI;
+using ColorUI = angle::ColorUI;
+using ColorGeneric = angle::ColorGeneric;
+
+} // namespace gl
+
+#include "Color.inc"
+
+#endif // COMMON_COLOR_H_
diff --git a/gfx/angle/checkout/src/common/Color.inc b/gfx/angle/checkout/src/common/Color.inc
new file mode 100644
index 0000000000..0e1445111b
--- /dev/null
+++ b/gfx/angle/checkout/src/common/Color.inc
@@ -0,0 +1,69 @@
+//
+// Copyright 2016 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+
+// Color.inc : Inline definitions of some functions from Color.h
+
+namespace angle
+{
+
+template <typename T>
+Color<T>::Color() : Color(0, 0, 0, 0)
+{
+}
+
+template <typename T>
+constexpr Color<T>::Color(T r, T g, T b, T a) : red(r), green(g), blue(b), alpha(a)
+{
+}
+
+template <typename T>
+bool operator==(const Color<T> &a, const Color<T> &b)
+{
+ return a.red == b.red &&
+ a.green == b.green &&
+ a.blue == b.blue &&
+ a.alpha == b.alpha;
+}
+
+template <typename T>
+bool operator!=(const Color<T> &a, const Color<T> &b)
+{
+ return !(a == b);
+}
+
+
+ColorGeneric::ColorGeneric() : colorF(), type(Type::Float) {}
+
+ColorGeneric::ColorGeneric(const ColorF &color) : colorF(color), type(Type::Float) {}
+
+ColorGeneric::ColorGeneric(const ColorI &color) : colorI(color), type(Type::Int) {}
+
+ColorGeneric::ColorGeneric(const ColorUI &color) : colorUI(color), type(Type::UInt) {}
+
+bool operator==(const ColorGeneric &a, const ColorGeneric &b)
+{
+ if (a.type != b.type)
+ {
+ return false;
+ }
+ switch (a.type)
+ {
+ default:
+ case ColorGeneric::Type::Float:
+ return a.colorF == b.colorF;
+ case ColorGeneric::Type::Int:
+ return a.colorI == b.colorI;
+ case ColorGeneric::Type::UInt:
+ return a.colorUI == b.colorUI;
+ }
+}
+
+bool operator!=(const ColorGeneric &a, const ColorGeneric &b)
+{
+ return !(a == b);
+}
+
+} // namespace angle
diff --git a/gfx/angle/checkout/src/common/FastVector.h b/gfx/angle/checkout/src/common/FastVector.h
new file mode 100644
index 0000000000..6991adf715
--- /dev/null
+++ b/gfx/angle/checkout/src/common/FastVector.h
@@ -0,0 +1,891 @@
+//
+// Copyright 2018 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// FastVector.h:
+// A vector class with a initial fixed size and variable growth.
+// Based on FixedVector.
+//
+
+#ifndef COMMON_FASTVECTOR_H_
+#define COMMON_FASTVECTOR_H_
+
+#include "bitset_utils.h"
+#include "common/debug.h"
+
+#include <algorithm>
+#include <array>
+#include <initializer_list>
+#include <iterator>
+
+namespace angle
+{
+
+template <class Iter>
+class WrapIter
+{
+ public:
+ typedef Iter iterator_type;
+ typedef typename std::iterator_traits<iterator_type>::value_type value_type;
+ typedef typename std::iterator_traits<iterator_type>::difference_type difference_type;
+ typedef typename std::iterator_traits<iterator_type>::pointer pointer;
+ typedef typename std::iterator_traits<iterator_type>::reference reference;
+ typedef typename std::iterator_traits<iterator_type>::iterator_category iterator_category;
+
+ WrapIter() : mIter() {}
+ WrapIter(const Iter &iter) : mIter(iter) {}
+ ~WrapIter() = default;
+
+ WrapIter &operator=(const WrapIter &x)
+ {
+ mIter = x.mIter;
+ return *this;
+ }
+
+ bool operator==(const WrapIter &x) const { return mIter == x.mIter; }
+ bool operator!=(const WrapIter &x) const { return mIter != x.mIter; }
+ bool operator<(const WrapIter &x) const { return mIter < x.mIter; }
+ bool operator<=(const WrapIter &x) const { return mIter <= x.mIter; }
+ bool operator>(const WrapIter &x) const { return mIter > x.mIter; }
+ bool operator>=(const WrapIter &x) const { return mIter >= x.mIter; }
+
+ WrapIter &operator++()
+ {
+ mIter++;
+ return *this;
+ }
+
+ WrapIter operator++(int)
+ {
+ WrapIter tmp(mIter);
+ mIter++;
+ return tmp;
+ }
+
+ WrapIter operator+(difference_type n)
+ {
+ WrapIter tmp(mIter);
+ tmp.mIter += n;
+ return tmp;
+ }
+
+ WrapIter operator-(difference_type n)
+ {
+ WrapIter tmp(mIter);
+ tmp.mIter -= n;
+ return tmp;
+ }
+
+ difference_type operator-(const WrapIter &x) const { return mIter - x.mIter; }
+
+ iterator_type operator->() const { return mIter; }
+
+ reference operator*() const { return *mIter; }
+
+ private:
+ iterator_type mIter;
+};
+
+template <class T, size_t N, class Storage = std::array<T, N>>
+class FastVector final
+{
+ public:
+ using value_type = typename Storage::value_type;
+ using size_type = typename Storage::size_type;
+ using reference = typename Storage::reference;
+ using const_reference = typename Storage::const_reference;
+ using pointer = typename Storage::pointer;
+ using const_pointer = typename Storage::const_pointer;
+ using iterator = WrapIter<T *>;
+ using const_iterator = WrapIter<const T *>;
+
+ FastVector();
+ FastVector(size_type count, const value_type &value);
+ FastVector(size_type count);
+
+ FastVector(const FastVector<T, N, Storage> &other);
+ FastVector(FastVector<T, N, Storage> &&other);
+ FastVector(std::initializer_list<value_type> init);
+
+ template <class InputIt, std::enable_if_t<!std::is_integral<InputIt>::value, bool> = true>
+ FastVector(InputIt first, InputIt last);
+
+ FastVector<T, N, Storage> &operator=(const FastVector<T, N, Storage> &other);
+ FastVector<T, N, Storage> &operator=(FastVector<T, N, Storage> &&other);
+ FastVector<T, N, Storage> &operator=(std::initializer_list<value_type> init);
+
+ ~FastVector();
+
+ reference at(size_type pos);
+ const_reference at(size_type pos) const;
+
+ reference operator[](size_type pos);
+ const_reference operator[](size_type pos) const;
+
+ pointer data();
+ const_pointer data() const;
+
+ iterator begin();
+ const_iterator begin() const;
+
+ iterator end();
+ const_iterator end() const;
+
+ bool empty() const;
+ size_type size() const;
+
+ void clear();
+
+ void push_back(const value_type &value);
+ void push_back(value_type &&value);
+
+ template <typename... Args>
+ void emplace_back(Args &&...args);
+
+ void pop_back();
+
+ reference front();
+ const_reference front() const;
+
+ reference back();
+ const_reference back() const;
+
+ void swap(FastVector<T, N, Storage> &other);
+
+ void resize(size_type count);
+ void resize(size_type count, const value_type &value);
+
+ void reserve(size_type count);
+
+ // Specialty function that removes a known element and might shuffle the list.
+ void remove_and_permute(const value_type &element);
+ void remove_and_permute(iterator pos);
+
+ private:
+ void assign_from_initializer_list(std::initializer_list<value_type> init);
+ void ensure_capacity(size_t capacity);
+ bool uses_fixed_storage() const;
+
+ Storage mFixedStorage;
+ pointer mData = mFixedStorage.data();
+ size_type mSize = 0;
+ size_type mReservedSize = N;
+};
+
+template <class T, size_t N, class StorageN, size_t M, class StorageM>
+bool operator==(const FastVector<T, N, StorageN> &a, const FastVector<T, M, StorageM> &b)
+{
+ return a.size() == b.size() && std::equal(a.begin(), a.end(), b.begin());
+}
+
+template <class T, size_t N, class StorageN, size_t M, class StorageM>
+bool operator!=(const FastVector<T, N, StorageN> &a, const FastVector<T, M, StorageM> &b)
+{
+ return !(a == b);
+}
+
+template <class T, size_t N, class Storage>
+ANGLE_INLINE bool FastVector<T, N, Storage>::uses_fixed_storage() const
+{
+ return mData == mFixedStorage.data();
+}
+
+template <class T, size_t N, class Storage>
+FastVector<T, N, Storage>::FastVector()
+{}
+
+template <class T, size_t N, class Storage>
+FastVector<T, N, Storage>::FastVector(size_type count, const value_type &value)
+{
+ ensure_capacity(count);
+ mSize = count;
+ std::fill(begin(), end(), value);
+}
+
+template <class T, size_t N, class Storage>
+FastVector<T, N, Storage>::FastVector(size_type count)
+{
+ ensure_capacity(count);
+ mSize = count;
+}
+
+template <class T, size_t N, class Storage>
+FastVector<T, N, Storage>::FastVector(const FastVector<T, N, Storage> &other)
+ : FastVector(other.begin(), other.end())
+{}
+
+template <class T, size_t N, class Storage>
+FastVector<T, N, Storage>::FastVector(FastVector<T, N, Storage> &&other) : FastVector()
+{
+ swap(other);
+}
+
+template <class T, size_t N, class Storage>
+FastVector<T, N, Storage>::FastVector(std::initializer_list<value_type> init)
+{
+ assign_from_initializer_list(init);
+}
+
+template <class T, size_t N, class Storage>
+template <class InputIt, std::enable_if_t<!std::is_integral<InputIt>::value, bool>>
+FastVector<T, N, Storage>::FastVector(InputIt first, InputIt last)
+{
+ size_t newSize = last - first;
+ ensure_capacity(newSize);
+ mSize = newSize;
+ std::copy(first, last, begin());
+}
+
+template <class T, size_t N, class Storage>
+FastVector<T, N, Storage> &FastVector<T, N, Storage>::operator=(
+ const FastVector<T, N, Storage> &other)
+{
+ ensure_capacity(other.mSize);
+ mSize = other.mSize;
+ std::copy(other.begin(), other.end(), begin());
+ return *this;
+}
+
+template <class T, size_t N, class Storage>
+FastVector<T, N, Storage> &FastVector<T, N, Storage>::operator=(FastVector<T, N, Storage> &&other)
+{
+ swap(other);
+ return *this;
+}
+
+template <class T, size_t N, class Storage>
+FastVector<T, N, Storage> &FastVector<T, N, Storage>::operator=(
+ std::initializer_list<value_type> init)
+{
+ assign_from_initializer_list(init);
+ return *this;
+}
+
+template <class T, size_t N, class Storage>
+FastVector<T, N, Storage>::~FastVector()
+{
+ clear();
+ if (!uses_fixed_storage())
+ {
+ delete[] mData;
+ }
+}
+
+template <class T, size_t N, class Storage>
+typename FastVector<T, N, Storage>::reference FastVector<T, N, Storage>::at(size_type pos)
+{
+ ASSERT(pos < mSize);
+ return mData[pos];
+}
+
+template <class T, size_t N, class Storage>
+typename FastVector<T, N, Storage>::const_reference FastVector<T, N, Storage>::at(
+ size_type pos) const
+{
+ ASSERT(pos < mSize);
+ return mData[pos];
+}
+
+template <class T, size_t N, class Storage>
+ANGLE_INLINE typename FastVector<T, N, Storage>::reference FastVector<T, N, Storage>::operator[](
+ size_type pos)
+{
+ ASSERT(pos < mSize);
+ return mData[pos];
+}
+
+template <class T, size_t N, class Storage>
+ANGLE_INLINE typename FastVector<T, N, Storage>::const_reference
+FastVector<T, N, Storage>::operator[](size_type pos) const
+{
+ ASSERT(pos < mSize);
+ return mData[pos];
+}
+
+template <class T, size_t N, class Storage>
+ANGLE_INLINE typename FastVector<T, N, Storage>::const_pointer
+angle::FastVector<T, N, Storage>::data() const
+{
+ return mData;
+}
+
+template <class T, size_t N, class Storage>
+ANGLE_INLINE typename FastVector<T, N, Storage>::pointer angle::FastVector<T, N, Storage>::data()
+{
+ return mData;
+}
+
+template <class T, size_t N, class Storage>
+ANGLE_INLINE typename FastVector<T, N, Storage>::iterator FastVector<T, N, Storage>::begin()
+{
+ return mData;
+}
+
+template <class T, size_t N, class Storage>
+ANGLE_INLINE typename FastVector<T, N, Storage>::const_iterator FastVector<T, N, Storage>::begin()
+ const
+{
+ return mData;
+}
+
+template <class T, size_t N, class Storage>
+ANGLE_INLINE typename FastVector<T, N, Storage>::iterator FastVector<T, N, Storage>::end()
+{
+ return mData + mSize;
+}
+
+template <class T, size_t N, class Storage>
+ANGLE_INLINE typename FastVector<T, N, Storage>::const_iterator FastVector<T, N, Storage>::end()
+ const
+{
+ return mData + mSize;
+}
+
+template <class T, size_t N, class Storage>
+ANGLE_INLINE bool FastVector<T, N, Storage>::empty() const
+{
+ return mSize == 0;
+}
+
+template <class T, size_t N, class Storage>
+ANGLE_INLINE typename FastVector<T, N, Storage>::size_type FastVector<T, N, Storage>::size() const
+{
+ return mSize;
+}
+
+template <class T, size_t N, class Storage>
+void FastVector<T, N, Storage>::clear()
+{
+ resize(0);
+}
+
+template <class T, size_t N, class Storage>
+ANGLE_INLINE void FastVector<T, N, Storage>::push_back(const value_type &value)
+{
+ if (mSize == mReservedSize)
+ ensure_capacity(mSize + 1);
+ mData[mSize++] = value;
+}
+
+template <class T, size_t N, class Storage>
+ANGLE_INLINE void FastVector<T, N, Storage>::push_back(value_type &&value)
+{
+ emplace_back(std::move(value));
+}
+
+template <class T, size_t N, class Storage>
+template <typename... Args>
+ANGLE_INLINE void FastVector<T, N, Storage>::emplace_back(Args &&...args)
+{
+ if (mSize == mReservedSize)
+ ensure_capacity(mSize + 1);
+ mData[mSize++] = std::move(T(std::forward<Args>(args)...));
+}
+
+template <class T, size_t N, class Storage>
+ANGLE_INLINE void FastVector<T, N, Storage>::pop_back()
+{
+ ASSERT(mSize > 0);
+ mSize--;
+}
+
+template <class T, size_t N, class Storage>
+ANGLE_INLINE typename FastVector<T, N, Storage>::reference FastVector<T, N, Storage>::front()
+{
+ ASSERT(mSize > 0);
+ return mData[0];
+}
+
+template <class T, size_t N, class Storage>
+ANGLE_INLINE typename FastVector<T, N, Storage>::const_reference FastVector<T, N, Storage>::front()
+ const
+{
+ ASSERT(mSize > 0);
+ return mData[0];
+}
+
+template <class T, size_t N, class Storage>
+ANGLE_INLINE typename FastVector<T, N, Storage>::reference FastVector<T, N, Storage>::back()
+{
+ ASSERT(mSize > 0);
+ return mData[mSize - 1];
+}
+
+template <class T, size_t N, class Storage>
+ANGLE_INLINE typename FastVector<T, N, Storage>::const_reference FastVector<T, N, Storage>::back()
+ const
+{
+ ASSERT(mSize > 0);
+ return mData[mSize - 1];
+}
+
+template <class T, size_t N, class Storage>
+void FastVector<T, N, Storage>::swap(FastVector<T, N, Storage> &other)
+{
+ std::swap(mSize, other.mSize);
+
+ pointer tempData = other.mData;
+ if (uses_fixed_storage())
+ other.mData = other.mFixedStorage.data();
+ else
+ other.mData = mData;
+ if (tempData == other.mFixedStorage.data())
+ mData = mFixedStorage.data();
+ else
+ mData = tempData;
+ std::swap(mReservedSize, other.mReservedSize);
+
+ if (uses_fixed_storage() || other.uses_fixed_storage())
+ std::swap(mFixedStorage, other.mFixedStorage);
+}
+
+template <class T, size_t N, class Storage>
+void FastVector<T, N, Storage>::resize(size_type count)
+{
+ if (count > mSize)
+ {
+ ensure_capacity(count);
+ }
+ mSize = count;
+}
+
+template <class T, size_t N, class Storage>
+void FastVector<T, N, Storage>::resize(size_type count, const value_type &value)
+{
+ if (count > mSize)
+ {
+ ensure_capacity(count);
+ std::fill(mData + mSize, mData + count, value);
+ }
+ mSize = count;
+}
+
+template <class T, size_t N, class Storage>
+void FastVector<T, N, Storage>::reserve(size_type count)
+{
+ ensure_capacity(count);
+}
+
+template <class T, size_t N, class Storage>
+void FastVector<T, N, Storage>::assign_from_initializer_list(std::initializer_list<value_type> init)
+{
+ ensure_capacity(init.size());
+ mSize = init.size();
+ size_t index = 0;
+ for (auto &value : init)
+ {
+ mData[index++] = value;
+ }
+}
+
+template <class T, size_t N, class Storage>
+ANGLE_INLINE void FastVector<T, N, Storage>::remove_and_permute(const value_type &element)
+{
+ size_t len = mSize - 1;
+ for (size_t index = 0; index < len; ++index)
+ {
+ if (mData[index] == element)
+ {
+ mData[index] = std::move(mData[len]);
+ break;
+ }
+ }
+ pop_back();
+}
+
+template <class T, size_t N, class Storage>
+ANGLE_INLINE void FastVector<T, N, Storage>::remove_and_permute(iterator pos)
+{
+ ASSERT(pos >= begin());
+ ASSERT(pos < end());
+ size_t len = mSize - 1;
+ *pos = std::move(mData[len]);
+ pop_back();
+}
+
+template <class T, size_t N, class Storage>
+void FastVector<T, N, Storage>::ensure_capacity(size_t capacity)
+{
+ // We have a minimum capacity of N.
+ if (mReservedSize < capacity)
+ {
+ ASSERT(capacity > N);
+ size_type newSize = std::max(mReservedSize, N);
+ while (newSize < capacity)
+ {
+ newSize *= 2;
+ }
+
+ pointer newData = new value_type[newSize];
+
+ if (mSize > 0)
+ {
+ std::move(begin(), end(), newData);
+ }
+
+ if (!uses_fixed_storage())
+ {
+ delete[] mData;
+ }
+
+ mData = newData;
+ mReservedSize = newSize;
+ }
+}
+
+template <class Value, size_t N>
+class FastMap final
+{
+ public:
+ FastMap() {}
+ ~FastMap() {}
+
+ Value &operator[](uint32_t key)
+ {
+ if (mData.size() <= key)
+ {
+ mData.resize(key + 1, {});
+ }
+ return mData[key];
+ }
+
+ const Value &operator[](uint32_t key) const
+ {
+ ASSERT(key < mData.size());
+ return mData[key];
+ }
+
+ void clear() { mData.clear(); }
+
+ bool empty() const { return mData.empty(); }
+ size_t size() const { return mData.size(); }
+
+ const Value *data() const { return mData.data(); }
+
+ bool operator==(const FastMap<Value, N> &other) const
+ {
+ return (size() == other.size()) &&
+ (memcmp(data(), other.data(), size() * sizeof(Value)) == 0);
+ }
+
+ private:
+ FastVector<Value, N> mData;
+};
+
+template <class Key, class Value, size_t N>
+class FlatUnorderedMap final
+{
+ public:
+ using Pair = std::pair<Key, Value>;
+ using Storage = FastVector<Pair, N>;
+ using iterator = typename Storage::iterator;
+ using const_iterator = typename Storage::const_iterator;
+
+ FlatUnorderedMap() = default;
+ ~FlatUnorderedMap() = default;
+
+ iterator begin() { return mData.begin(); }
+ const_iterator begin() const { return mData.begin(); }
+ iterator end() { return mData.end(); }
+ const_iterator end() const { return mData.end(); }
+
+ iterator find(const Key &key)
+ {
+ for (auto it = mData.begin(); it != mData.end(); ++it)
+ {
+ if (it->first == key)
+ {
+ return it;
+ }
+ }
+ return mData.end();
+ }
+
+ const_iterator find(const Key &key) const
+ {
+ for (auto it = mData.begin(); it != mData.end(); ++it)
+ {
+ if (it->first == key)
+ {
+ return it;
+ }
+ }
+ return mData.end();
+ }
+
+ Value &operator[](const Key &key)
+ {
+ iterator it = find(key);
+ if (it != end())
+ {
+ return it->second;
+ }
+
+ mData.push_back(Pair(key, {}));
+ return mData.back().second;
+ }
+
+ void insert(Pair pair)
+ {
+ ASSERT(!contains(pair.first));
+ mData.push_back(std::move(pair));
+ }
+
+ void insert(const Key &key, Value value) { insert(Pair(key, value)); }
+
+ void erase(iterator pos) { mData.remove_and_permute(pos); }
+
+ bool contains(const Key &key) const { return find(key) != end(); }
+
+ void clear() { mData.clear(); }
+
+ bool get(const Key &key, Value *value) const
+ {
+ auto it = find(key);
+ if (it != end())
+ {
+ *value = it->second;
+ return true;
+ }
+ return false;
+ }
+
+ bool empty() const { return mData.empty(); }
+ size_t size() const { return mData.size(); }
+
+ private:
+ FastVector<Pair, N> mData;
+};
+
+template <class T, size_t N>
+class FlatUnorderedSet final
+{
+ public:
+ using Storage = FastVector<T, N>;
+ using iterator = typename Storage::iterator;
+ using const_iterator = typename Storage::const_iterator;
+
+ FlatUnorderedSet() = default;
+ ~FlatUnorderedSet() = default;
+
+ iterator begin() { return mData.begin(); }
+ const_iterator begin() const { return mData.begin(); }
+ iterator end() { return mData.end(); }
+ const_iterator end() const { return mData.end(); }
+
+ iterator find(const T &value)
+ {
+ for (auto it = mData.begin(); it != mData.end(); ++it)
+ {
+ if (*it == value)
+ {
+ return it;
+ }
+ }
+ return mData.end();
+ }
+
+ const_iterator find(const T &value) const
+ {
+ for (auto it = mData.begin(); it != mData.end(); ++it)
+ {
+ if (*it == value)
+ {
+ return it;
+ }
+ }
+ return mData.end();
+ }
+
+ bool empty() const { return mData.empty(); }
+
+ void insert(const T &value)
+ {
+ ASSERT(!contains(value));
+ mData.push_back(value);
+ }
+
+ void erase(const T &value)
+ {
+ ASSERT(contains(value));
+ mData.remove_and_permute(value);
+ }
+
+ void remove(const T &value) { erase(value); }
+
+ bool contains(const T &value) const { return find(value) != end(); }
+
+ void clear() { mData.clear(); }
+
+ bool operator==(const FlatUnorderedSet<T, N> &other) const { return mData == other.mData; }
+
+ private:
+ Storage mData;
+};
+
+class FastIntegerSet final
+{
+ public:
+ static constexpr size_t kWindowSize = 64;
+ static constexpr size_t kOneLessThanKWindowSize = kWindowSize - 1;
+ static constexpr size_t kShiftForDivision =
+ static_cast<size_t>(rx::Log2(static_cast<unsigned int>(kWindowSize)));
+ using KeyBitSet = angle::BitSet64<kWindowSize>;
+
+ ANGLE_INLINE FastIntegerSet();
+ ANGLE_INLINE ~FastIntegerSet();
+
+ ANGLE_INLINE void ensureCapacity(size_t size)
+ {
+ if (capacity() <= size)
+ {
+ reserve(size * 2);
+ }
+ }
+
+ ANGLE_INLINE void insert(uint64_t key)
+ {
+ size_t sizedKey = static_cast<size_t>(key);
+
+ ASSERT(!contains(sizedKey));
+ ensureCapacity(sizedKey);
+ ASSERT(capacity() > sizedKey);
+
+ size_t index = sizedKey >> kShiftForDivision;
+ size_t offset = sizedKey & kOneLessThanKWindowSize;
+
+ mKeyData[index].set(offset, true);
+ }
+
+ ANGLE_INLINE bool contains(uint64_t key) const
+ {
+ size_t sizedKey = static_cast<size_t>(key);
+
+ size_t index = sizedKey >> kShiftForDivision;
+ size_t offset = sizedKey & kOneLessThanKWindowSize;
+
+ return (sizedKey < capacity()) && (mKeyData[index].test(offset));
+ }
+
+ ANGLE_INLINE void clear()
+ {
+ for (KeyBitSet &it : mKeyData)
+ {
+ it.reset();
+ }
+ }
+
+ ANGLE_INLINE bool empty() const
+ {
+ for (const KeyBitSet &it : mKeyData)
+ {
+ if (it.any())
+ {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ ANGLE_INLINE size_t size() const
+ {
+ size_t valid_entries = 0;
+ for (const KeyBitSet &it : mKeyData)
+ {
+ valid_entries += it.count();
+ }
+ return valid_entries;
+ }
+
+ private:
+ ANGLE_INLINE size_t capacity() const { return kWindowSize * mKeyData.size(); }
+
+ ANGLE_INLINE void reserve(size_t newSize)
+ {
+ size_t alignedSize = rx::roundUpPow2(newSize, kWindowSize);
+ size_t count = alignedSize >> kShiftForDivision;
+
+ mKeyData.resize(count, KeyBitSet::Zero());
+ }
+
+ std::vector<KeyBitSet> mKeyData;
+};
+
+// This is needed to accommodate the chromium style guide error -
+// [chromium-style] Complex constructor has an inlined body.
+ANGLE_INLINE FastIntegerSet::FastIntegerSet() {}
+ANGLE_INLINE FastIntegerSet::~FastIntegerSet() {}
+
+template <typename Value>
+class FastIntegerMap final
+{
+ public:
+ FastIntegerMap() {}
+ ~FastIntegerMap() {}
+
+ ANGLE_INLINE void ensureCapacity(size_t size)
+ {
+ // Ensure key set has capacity
+ mKeySet.ensureCapacity(size);
+
+ // Ensure value vector has capacity
+ ensureCapacityImpl(size);
+ }
+
+ ANGLE_INLINE void insert(uint64_t key, Value value)
+ {
+ // Insert key
+ ASSERT(!mKeySet.contains(key));
+ mKeySet.insert(key);
+
+ // Insert value
+ size_t sizedKey = static_cast<size_t>(key);
+ ensureCapacityImpl(sizedKey);
+ ASSERT(capacity() > sizedKey);
+ mValueData[sizedKey] = value;
+ }
+
+ ANGLE_INLINE bool contains(uint64_t key) const { return mKeySet.contains(key); }
+
+ ANGLE_INLINE bool get(uint64_t key, Value *out) const
+ {
+ if (!mKeySet.contains(key))
+ {
+ return false;
+ }
+
+ size_t sizedKey = static_cast<size_t>(key);
+ *out = mValueData[sizedKey];
+ return true;
+ }
+
+ ANGLE_INLINE void clear() { mKeySet.clear(); }
+
+ ANGLE_INLINE bool empty() const { return mKeySet.empty(); }
+
+ ANGLE_INLINE size_t size() const { return mKeySet.size(); }
+
+ private:
+ ANGLE_INLINE size_t capacity() const { return mValueData.size(); }
+
+ ANGLE_INLINE void ensureCapacityImpl(size_t size)
+ {
+ if (capacity() <= size)
+ {
+ reserve(size * 2);
+ }
+ }
+
+ ANGLE_INLINE void reserve(size_t newSize)
+ {
+ size_t alignedSize = rx::roundUpPow2(newSize, FastIntegerSet::kWindowSize);
+ mValueData.resize(alignedSize);
+ }
+
+ FastIntegerSet mKeySet;
+ std::vector<Value> mValueData;
+};
+} // namespace angle
+
+#endif // COMMON_FASTVECTOR_H_
diff --git a/gfx/angle/checkout/src/common/FixedVector.h b/gfx/angle/checkout/src/common/FixedVector.h
new file mode 100644
index 0000000000..bff87fc969
--- /dev/null
+++ b/gfx/angle/checkout/src/common/FixedVector.h
@@ -0,0 +1,353 @@
+//
+// Copyright 2018 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// FixedVector.h:
+// A vector class with a maximum size and fixed storage.
+//
+
+#ifndef COMMON_FIXEDVECTOR_H_
+#define COMMON_FIXEDVECTOR_H_
+
+#include "common/debug.h"
+
+#include <algorithm>
+#include <array>
+#include <initializer_list>
+
+namespace angle
+{
+template <class T, size_t N, class Storage = std::array<T, N>>
+class FixedVector final
+{
+ public:
+ using value_type = typename Storage::value_type;
+ using size_type = typename Storage::size_type;
+ using reference = typename Storage::reference;
+ using const_reference = typename Storage::const_reference;
+ using pointer = typename Storage::pointer;
+ using const_pointer = typename Storage::const_pointer;
+ using iterator = typename Storage::iterator;
+ using const_iterator = typename Storage::const_iterator;
+ using reverse_iterator = typename Storage::reverse_iterator;
+ using const_reverse_iterator = typename Storage::const_reverse_iterator;
+
+ FixedVector();
+ FixedVector(size_type count, const value_type &value);
+ FixedVector(size_type count);
+
+ FixedVector(const FixedVector<T, N, Storage> &other);
+ FixedVector(FixedVector<T, N, Storage> &&other);
+ FixedVector(std::initializer_list<value_type> init);
+
+ FixedVector<T, N, Storage> &operator=(const FixedVector<T, N, Storage> &other);
+ FixedVector<T, N, Storage> &operator=(FixedVector<T, N, Storage> &&other);
+ FixedVector<T, N, Storage> &operator=(std::initializer_list<value_type> init);
+
+ ~FixedVector();
+
+ reference at(size_type pos);
+ const_reference at(size_type pos) const;
+
+ reference operator[](size_type pos);
+ const_reference operator[](size_type pos) const;
+
+ pointer data();
+ const_pointer data() const;
+
+ iterator begin();
+ const_iterator begin() const;
+
+ iterator end();
+ const_iterator end() const;
+
+ bool empty() const;
+ size_type size() const;
+ static constexpr size_type max_size();
+
+ void clear();
+
+ void push_back(const value_type &value);
+ void push_back(value_type &&value);
+
+ template <class... Args>
+ void emplace_back(Args &&... args);
+
+ void pop_back();
+ reference back();
+ const_reference back() const;
+
+ void swap(FixedVector<T, N, Storage> &other);
+
+ void resize(size_type count);
+ void resize(size_type count, const value_type &value);
+
+ bool full() const;
+
+ private:
+ void assign_from_initializer_list(std::initializer_list<value_type> init);
+
+ Storage mStorage;
+ size_type mSize = 0;
+};
+
+template <class T, size_t N, class Storage>
+bool operator==(const FixedVector<T, N, Storage> &a, const FixedVector<T, N, Storage> &b)
+{
+ return a.size() == b.size() && std::equal(a.begin(), a.end(), b.begin());
+}
+
+template <class T, size_t N, class Storage>
+bool operator!=(const FixedVector<T, N, Storage> &a, const FixedVector<T, N, Storage> &b)
+{
+ return !(a == b);
+}
+
+template <class T, size_t N, class Storage>
+FixedVector<T, N, Storage>::FixedVector() = default;
+
+template <class T, size_t N, class Storage>
+FixedVector<T, N, Storage>::FixedVector(size_type count, const value_type &value) : mSize(count)
+{
+ ASSERT(count <= N);
+ std::fill(mStorage.begin(), mStorage.begin() + count, value);
+}
+
+template <class T, size_t N, class Storage>
+FixedVector<T, N, Storage>::FixedVector(size_type count) : mSize(count)
+{
+ ASSERT(count <= N);
+}
+
+template <class T, size_t N, class Storage>
+FixedVector<T, N, Storage>::FixedVector(const FixedVector<T, N, Storage> &other) = default;
+
+template <class T, size_t N, class Storage>
+FixedVector<T, N, Storage>::FixedVector(FixedVector<T, N, Storage> &&other) = default;
+
+template <class T, size_t N, class Storage>
+FixedVector<T, N, Storage>::FixedVector(std::initializer_list<value_type> init)
+{
+ ASSERT(init.size() <= N);
+ assign_from_initializer_list(init);
+}
+
+template <class T, size_t N, class Storage>
+FixedVector<T, N, Storage> &FixedVector<T, N, Storage>::operator=(
+ const FixedVector<T, N, Storage> &other) = default;
+
+template <class T, size_t N, class Storage>
+FixedVector<T, N, Storage> &FixedVector<T, N, Storage>::operator=(
+ FixedVector<T, N, Storage> &&other) = default;
+
+template <class T, size_t N, class Storage>
+FixedVector<T, N, Storage> &FixedVector<T, N, Storage>::operator=(
+ std::initializer_list<value_type> init)
+{
+ clear();
+ ASSERT(init.size() <= N);
+ assign_from_initializer_list(init);
+ return this;
+}
+
+template <class T, size_t N, class Storage>
+FixedVector<T, N, Storage>::~FixedVector()
+{
+ clear();
+}
+
+template <class T, size_t N, class Storage>
+typename FixedVector<T, N, Storage>::reference FixedVector<T, N, Storage>::at(size_type pos)
+{
+ ASSERT(pos < N);
+ return mStorage.at(pos);
+}
+
+template <class T, size_t N, class Storage>
+typename FixedVector<T, N, Storage>::const_reference FixedVector<T, N, Storage>::at(
+ size_type pos) const
+{
+ ASSERT(pos < N);
+ return mStorage.at(pos);
+}
+
+template <class T, size_t N, class Storage>
+typename FixedVector<T, N, Storage>::reference FixedVector<T, N, Storage>::operator[](size_type pos)
+{
+ ASSERT(pos < N);
+ return mStorage[pos];
+}
+
+template <class T, size_t N, class Storage>
+typename FixedVector<T, N, Storage>::const_reference FixedVector<T, N, Storage>::operator[](
+ size_type pos) const
+{
+ ASSERT(pos < N);
+ return mStorage[pos];
+}
+
+template <class T, size_t N, class Storage>
+typename FixedVector<T, N, Storage>::const_pointer angle::FixedVector<T, N, Storage>::data() const
+{
+ return mStorage.data();
+}
+
+template <class T, size_t N, class Storage>
+typename FixedVector<T, N, Storage>::pointer angle::FixedVector<T, N, Storage>::data()
+{
+ return mStorage.data();
+}
+
+template <class T, size_t N, class Storage>
+typename FixedVector<T, N, Storage>::iterator FixedVector<T, N, Storage>::begin()
+{
+ return mStorage.begin();
+}
+
+template <class T, size_t N, class Storage>
+typename FixedVector<T, N, Storage>::const_iterator FixedVector<T, N, Storage>::begin() const
+{
+ return mStorage.begin();
+}
+
+template <class T, size_t N, class Storage>
+typename FixedVector<T, N, Storage>::iterator FixedVector<T, N, Storage>::end()
+{
+ return mStorage.begin() + mSize;
+}
+
+template <class T, size_t N, class Storage>
+typename FixedVector<T, N, Storage>::const_iterator FixedVector<T, N, Storage>::end() const
+{
+ return mStorage.begin() + mSize;
+}
+
+template <class T, size_t N, class Storage>
+bool FixedVector<T, N, Storage>::empty() const
+{
+ return mSize == 0;
+}
+
+template <class T, size_t N, class Storage>
+typename FixedVector<T, N, Storage>::size_type FixedVector<T, N, Storage>::size() const
+{
+ return mSize;
+}
+
+template <class T, size_t N, class Storage>
+constexpr typename FixedVector<T, N, Storage>::size_type FixedVector<T, N, Storage>::max_size()
+{
+ return N;
+}
+
+template <class T, size_t N, class Storage>
+void FixedVector<T, N, Storage>::clear()
+{
+ resize(0);
+}
+
+template <class T, size_t N, class Storage>
+void FixedVector<T, N, Storage>::push_back(const value_type &value)
+{
+ ASSERT(mSize < N);
+ mStorage[mSize] = value;
+ mSize++;
+}
+
+template <class T, size_t N, class Storage>
+void FixedVector<T, N, Storage>::push_back(value_type &&value)
+{
+ ASSERT(mSize < N);
+ mStorage[mSize] = std::move(value);
+ mSize++;
+}
+
+template <class T, size_t N, class Storage>
+template <class... Args>
+void FixedVector<T, N, Storage>::emplace_back(Args &&... args)
+{
+ ASSERT(mSize < N);
+ new (&mStorage[mSize]) T{std::forward<Args>(args)...};
+ mSize++;
+}
+
+template <class T, size_t N, class Storage>
+void FixedVector<T, N, Storage>::pop_back()
+{
+ ASSERT(mSize > 0);
+ mSize--;
+}
+
+template <class T, size_t N, class Storage>
+typename FixedVector<T, N, Storage>::reference FixedVector<T, N, Storage>::back()
+{
+ ASSERT(mSize > 0);
+ return mStorage[mSize - 1];
+}
+
+template <class T, size_t N, class Storage>
+typename FixedVector<T, N, Storage>::const_reference FixedVector<T, N, Storage>::back() const
+{
+ ASSERT(mSize > 0);
+ return mStorage[mSize - 1];
+}
+
+template <class T, size_t N, class Storage>
+void FixedVector<T, N, Storage>::swap(FixedVector<T, N, Storage> &other)
+{
+ std::swap(mSize, other.mSize);
+ std::swap(mStorage, other.mStorage);
+}
+
+template <class T, size_t N, class Storage>
+void FixedVector<T, N, Storage>::resize(size_type count)
+{
+ ASSERT(count <= N);
+ while (mSize > count)
+ {
+ mSize--;
+ mStorage[mSize] = value_type();
+ }
+ while (mSize < count)
+ {
+ mStorage[mSize] = value_type();
+ mSize++;
+ }
+}
+
+template <class T, size_t N, class Storage>
+void FixedVector<T, N, Storage>::resize(size_type count, const value_type &value)
+{
+ ASSERT(count <= N);
+ while (mSize > count)
+ {
+ mSize--;
+ mStorage[mSize] = value_type();
+ }
+ while (mSize < count)
+ {
+ mStorage[mSize] = value;
+ mSize++;
+ }
+}
+
+template <class T, size_t N, class Storage>
+void FixedVector<T, N, Storage>::assign_from_initializer_list(
+ std::initializer_list<value_type> init)
+{
+ for (auto element : init)
+ {
+ mStorage[mSize] = std::move(element);
+ mSize++;
+ }
+}
+
+template <class T, size_t N, class Storage>
+bool FixedVector<T, N, Storage>::full() const
+{
+ return (mSize == N);
+}
+} // namespace angle
+
+#endif // COMMON_FIXEDVECTOR_H_
diff --git a/gfx/angle/checkout/src/common/Float16ToFloat32.cpp b/gfx/angle/checkout/src/common/Float16ToFloat32.cpp
new file mode 100644
index 0000000000..f9dfe23307
--- /dev/null
+++ b/gfx/angle/checkout/src/common/Float16ToFloat32.cpp
@@ -0,0 +1,300 @@
+//
+// Copyright 2012 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+
+// This file is automatically generated.
+
+#include "common/mathutil.h"
+
+namespace gl
+{
+
+const static unsigned g_mantissa[2048] = {
+ 0x00000000, 0x33800000, 0x34000000, 0x34400000, 0x34800000, 0x34a00000, 0x34c00000, 0x34e00000,
+ 0x35000000, 0x35100000, 0x35200000, 0x35300000, 0x35400000, 0x35500000, 0x35600000, 0x35700000,
+ 0x35800000, 0x35880000, 0x35900000, 0x35980000, 0x35a00000, 0x35a80000, 0x35b00000, 0x35b80000,
+ 0x35c00000, 0x35c80000, 0x35d00000, 0x35d80000, 0x35e00000, 0x35e80000, 0x35f00000, 0x35f80000,
+ 0x36000000, 0x36040000, 0x36080000, 0x360c0000, 0x36100000, 0x36140000, 0x36180000, 0x361c0000,
+ 0x36200000, 0x36240000, 0x36280000, 0x362c0000, 0x36300000, 0x36340000, 0x36380000, 0x363c0000,
+ 0x36400000, 0x36440000, 0x36480000, 0x364c0000, 0x36500000, 0x36540000, 0x36580000, 0x365c0000,
+ 0x36600000, 0x36640000, 0x36680000, 0x366c0000, 0x36700000, 0x36740000, 0x36780000, 0x367c0000,
+ 0x36800000, 0x36820000, 0x36840000, 0x36860000, 0x36880000, 0x368a0000, 0x368c0000, 0x368e0000,
+ 0x36900000, 0x36920000, 0x36940000, 0x36960000, 0x36980000, 0x369a0000, 0x369c0000, 0x369e0000,
+ 0x36a00000, 0x36a20000, 0x36a40000, 0x36a60000, 0x36a80000, 0x36aa0000, 0x36ac0000, 0x36ae0000,
+ 0x36b00000, 0x36b20000, 0x36b40000, 0x36b60000, 0x36b80000, 0x36ba0000, 0x36bc0000, 0x36be0000,
+ 0x36c00000, 0x36c20000, 0x36c40000, 0x36c60000, 0x36c80000, 0x36ca0000, 0x36cc0000, 0x36ce0000,
+ 0x36d00000, 0x36d20000, 0x36d40000, 0x36d60000, 0x36d80000, 0x36da0000, 0x36dc0000, 0x36de0000,
+ 0x36e00000, 0x36e20000, 0x36e40000, 0x36e60000, 0x36e80000, 0x36ea0000, 0x36ec0000, 0x36ee0000,
+ 0x36f00000, 0x36f20000, 0x36f40000, 0x36f60000, 0x36f80000, 0x36fa0000, 0x36fc0000, 0x36fe0000,
+ 0x37000000, 0x37010000, 0x37020000, 0x37030000, 0x37040000, 0x37050000, 0x37060000, 0x37070000,
+ 0x37080000, 0x37090000, 0x370a0000, 0x370b0000, 0x370c0000, 0x370d0000, 0x370e0000, 0x370f0000,
+ 0x37100000, 0x37110000, 0x37120000, 0x37130000, 0x37140000, 0x37150000, 0x37160000, 0x37170000,
+ 0x37180000, 0x37190000, 0x371a0000, 0x371b0000, 0x371c0000, 0x371d0000, 0x371e0000, 0x371f0000,
+ 0x37200000, 0x37210000, 0x37220000, 0x37230000, 0x37240000, 0x37250000, 0x37260000, 0x37270000,
+ 0x37280000, 0x37290000, 0x372a0000, 0x372b0000, 0x372c0000, 0x372d0000, 0x372e0000, 0x372f0000,
+ 0x37300000, 0x37310000, 0x37320000, 0x37330000, 0x37340000, 0x37350000, 0x37360000, 0x37370000,
+ 0x37380000, 0x37390000, 0x373a0000, 0x373b0000, 0x373c0000, 0x373d0000, 0x373e0000, 0x373f0000,
+ 0x37400000, 0x37410000, 0x37420000, 0x37430000, 0x37440000, 0x37450000, 0x37460000, 0x37470000,
+ 0x37480000, 0x37490000, 0x374a0000, 0x374b0000, 0x374c0000, 0x374d0000, 0x374e0000, 0x374f0000,
+ 0x37500000, 0x37510000, 0x37520000, 0x37530000, 0x37540000, 0x37550000, 0x37560000, 0x37570000,
+ 0x37580000, 0x37590000, 0x375a0000, 0x375b0000, 0x375c0000, 0x375d0000, 0x375e0000, 0x375f0000,
+ 0x37600000, 0x37610000, 0x37620000, 0x37630000, 0x37640000, 0x37650000, 0x37660000, 0x37670000,
+ 0x37680000, 0x37690000, 0x376a0000, 0x376b0000, 0x376c0000, 0x376d0000, 0x376e0000, 0x376f0000,
+ 0x37700000, 0x37710000, 0x37720000, 0x37730000, 0x37740000, 0x37750000, 0x37760000, 0x37770000,
+ 0x37780000, 0x37790000, 0x377a0000, 0x377b0000, 0x377c0000, 0x377d0000, 0x377e0000, 0x377f0000,
+ 0x37800000, 0x37808000, 0x37810000, 0x37818000, 0x37820000, 0x37828000, 0x37830000, 0x37838000,
+ 0x37840000, 0x37848000, 0x37850000, 0x37858000, 0x37860000, 0x37868000, 0x37870000, 0x37878000,
+ 0x37880000, 0x37888000, 0x37890000, 0x37898000, 0x378a0000, 0x378a8000, 0x378b0000, 0x378b8000,
+ 0x378c0000, 0x378c8000, 0x378d0000, 0x378d8000, 0x378e0000, 0x378e8000, 0x378f0000, 0x378f8000,
+ 0x37900000, 0x37908000, 0x37910000, 0x37918000, 0x37920000, 0x37928000, 0x37930000, 0x37938000,
+ 0x37940000, 0x37948000, 0x37950000, 0x37958000, 0x37960000, 0x37968000, 0x37970000, 0x37978000,
+ 0x37980000, 0x37988000, 0x37990000, 0x37998000, 0x379a0000, 0x379a8000, 0x379b0000, 0x379b8000,
+ 0x379c0000, 0x379c8000, 0x379d0000, 0x379d8000, 0x379e0000, 0x379e8000, 0x379f0000, 0x379f8000,
+ 0x37a00000, 0x37a08000, 0x37a10000, 0x37a18000, 0x37a20000, 0x37a28000, 0x37a30000, 0x37a38000,
+ 0x37a40000, 0x37a48000, 0x37a50000, 0x37a58000, 0x37a60000, 0x37a68000, 0x37a70000, 0x37a78000,
+ 0x37a80000, 0x37a88000, 0x37a90000, 0x37a98000, 0x37aa0000, 0x37aa8000, 0x37ab0000, 0x37ab8000,
+ 0x37ac0000, 0x37ac8000, 0x37ad0000, 0x37ad8000, 0x37ae0000, 0x37ae8000, 0x37af0000, 0x37af8000,
+ 0x37b00000, 0x37b08000, 0x37b10000, 0x37b18000, 0x37b20000, 0x37b28000, 0x37b30000, 0x37b38000,
+ 0x37b40000, 0x37b48000, 0x37b50000, 0x37b58000, 0x37b60000, 0x37b68000, 0x37b70000, 0x37b78000,
+ 0x37b80000, 0x37b88000, 0x37b90000, 0x37b98000, 0x37ba0000, 0x37ba8000, 0x37bb0000, 0x37bb8000,
+ 0x37bc0000, 0x37bc8000, 0x37bd0000, 0x37bd8000, 0x37be0000, 0x37be8000, 0x37bf0000, 0x37bf8000,
+ 0x37c00000, 0x37c08000, 0x37c10000, 0x37c18000, 0x37c20000, 0x37c28000, 0x37c30000, 0x37c38000,
+ 0x37c40000, 0x37c48000, 0x37c50000, 0x37c58000, 0x37c60000, 0x37c68000, 0x37c70000, 0x37c78000,
+ 0x37c80000, 0x37c88000, 0x37c90000, 0x37c98000, 0x37ca0000, 0x37ca8000, 0x37cb0000, 0x37cb8000,
+ 0x37cc0000, 0x37cc8000, 0x37cd0000, 0x37cd8000, 0x37ce0000, 0x37ce8000, 0x37cf0000, 0x37cf8000,
+ 0x37d00000, 0x37d08000, 0x37d10000, 0x37d18000, 0x37d20000, 0x37d28000, 0x37d30000, 0x37d38000,
+ 0x37d40000, 0x37d48000, 0x37d50000, 0x37d58000, 0x37d60000, 0x37d68000, 0x37d70000, 0x37d78000,
+ 0x37d80000, 0x37d88000, 0x37d90000, 0x37d98000, 0x37da0000, 0x37da8000, 0x37db0000, 0x37db8000,
+ 0x37dc0000, 0x37dc8000, 0x37dd0000, 0x37dd8000, 0x37de0000, 0x37de8000, 0x37df0000, 0x37df8000,
+ 0x37e00000, 0x37e08000, 0x37e10000, 0x37e18000, 0x37e20000, 0x37e28000, 0x37e30000, 0x37e38000,
+ 0x37e40000, 0x37e48000, 0x37e50000, 0x37e58000, 0x37e60000, 0x37e68000, 0x37e70000, 0x37e78000,
+ 0x37e80000, 0x37e88000, 0x37e90000, 0x37e98000, 0x37ea0000, 0x37ea8000, 0x37eb0000, 0x37eb8000,
+ 0x37ec0000, 0x37ec8000, 0x37ed0000, 0x37ed8000, 0x37ee0000, 0x37ee8000, 0x37ef0000, 0x37ef8000,
+ 0x37f00000, 0x37f08000, 0x37f10000, 0x37f18000, 0x37f20000, 0x37f28000, 0x37f30000, 0x37f38000,
+ 0x37f40000, 0x37f48000, 0x37f50000, 0x37f58000, 0x37f60000, 0x37f68000, 0x37f70000, 0x37f78000,
+ 0x37f80000, 0x37f88000, 0x37f90000, 0x37f98000, 0x37fa0000, 0x37fa8000, 0x37fb0000, 0x37fb8000,
+ 0x37fc0000, 0x37fc8000, 0x37fd0000, 0x37fd8000, 0x37fe0000, 0x37fe8000, 0x37ff0000, 0x37ff8000,
+ 0x38000000, 0x38004000, 0x38008000, 0x3800c000, 0x38010000, 0x38014000, 0x38018000, 0x3801c000,
+ 0x38020000, 0x38024000, 0x38028000, 0x3802c000, 0x38030000, 0x38034000, 0x38038000, 0x3803c000,
+ 0x38040000, 0x38044000, 0x38048000, 0x3804c000, 0x38050000, 0x38054000, 0x38058000, 0x3805c000,
+ 0x38060000, 0x38064000, 0x38068000, 0x3806c000, 0x38070000, 0x38074000, 0x38078000, 0x3807c000,
+ 0x38080000, 0x38084000, 0x38088000, 0x3808c000, 0x38090000, 0x38094000, 0x38098000, 0x3809c000,
+ 0x380a0000, 0x380a4000, 0x380a8000, 0x380ac000, 0x380b0000, 0x380b4000, 0x380b8000, 0x380bc000,
+ 0x380c0000, 0x380c4000, 0x380c8000, 0x380cc000, 0x380d0000, 0x380d4000, 0x380d8000, 0x380dc000,
+ 0x380e0000, 0x380e4000, 0x380e8000, 0x380ec000, 0x380f0000, 0x380f4000, 0x380f8000, 0x380fc000,
+ 0x38100000, 0x38104000, 0x38108000, 0x3810c000, 0x38110000, 0x38114000, 0x38118000, 0x3811c000,
+ 0x38120000, 0x38124000, 0x38128000, 0x3812c000, 0x38130000, 0x38134000, 0x38138000, 0x3813c000,
+ 0x38140000, 0x38144000, 0x38148000, 0x3814c000, 0x38150000, 0x38154000, 0x38158000, 0x3815c000,
+ 0x38160000, 0x38164000, 0x38168000, 0x3816c000, 0x38170000, 0x38174000, 0x38178000, 0x3817c000,
+ 0x38180000, 0x38184000, 0x38188000, 0x3818c000, 0x38190000, 0x38194000, 0x38198000, 0x3819c000,
+ 0x381a0000, 0x381a4000, 0x381a8000, 0x381ac000, 0x381b0000, 0x381b4000, 0x381b8000, 0x381bc000,
+ 0x381c0000, 0x381c4000, 0x381c8000, 0x381cc000, 0x381d0000, 0x381d4000, 0x381d8000, 0x381dc000,
+ 0x381e0000, 0x381e4000, 0x381e8000, 0x381ec000, 0x381f0000, 0x381f4000, 0x381f8000, 0x381fc000,
+ 0x38200000, 0x38204000, 0x38208000, 0x3820c000, 0x38210000, 0x38214000, 0x38218000, 0x3821c000,
+ 0x38220000, 0x38224000, 0x38228000, 0x3822c000, 0x38230000, 0x38234000, 0x38238000, 0x3823c000,
+ 0x38240000, 0x38244000, 0x38248000, 0x3824c000, 0x38250000, 0x38254000, 0x38258000, 0x3825c000,
+ 0x38260000, 0x38264000, 0x38268000, 0x3826c000, 0x38270000, 0x38274000, 0x38278000, 0x3827c000,
+ 0x38280000, 0x38284000, 0x38288000, 0x3828c000, 0x38290000, 0x38294000, 0x38298000, 0x3829c000,
+ 0x382a0000, 0x382a4000, 0x382a8000, 0x382ac000, 0x382b0000, 0x382b4000, 0x382b8000, 0x382bc000,
+ 0x382c0000, 0x382c4000, 0x382c8000, 0x382cc000, 0x382d0000, 0x382d4000, 0x382d8000, 0x382dc000,
+ 0x382e0000, 0x382e4000, 0x382e8000, 0x382ec000, 0x382f0000, 0x382f4000, 0x382f8000, 0x382fc000,
+ 0x38300000, 0x38304000, 0x38308000, 0x3830c000, 0x38310000, 0x38314000, 0x38318000, 0x3831c000,
+ 0x38320000, 0x38324000, 0x38328000, 0x3832c000, 0x38330000, 0x38334000, 0x38338000, 0x3833c000,
+ 0x38340000, 0x38344000, 0x38348000, 0x3834c000, 0x38350000, 0x38354000, 0x38358000, 0x3835c000,
+ 0x38360000, 0x38364000, 0x38368000, 0x3836c000, 0x38370000, 0x38374000, 0x38378000, 0x3837c000,
+ 0x38380000, 0x38384000, 0x38388000, 0x3838c000, 0x38390000, 0x38394000, 0x38398000, 0x3839c000,
+ 0x383a0000, 0x383a4000, 0x383a8000, 0x383ac000, 0x383b0000, 0x383b4000, 0x383b8000, 0x383bc000,
+ 0x383c0000, 0x383c4000, 0x383c8000, 0x383cc000, 0x383d0000, 0x383d4000, 0x383d8000, 0x383dc000,
+ 0x383e0000, 0x383e4000, 0x383e8000, 0x383ec000, 0x383f0000, 0x383f4000, 0x383f8000, 0x383fc000,
+ 0x38400000, 0x38404000, 0x38408000, 0x3840c000, 0x38410000, 0x38414000, 0x38418000, 0x3841c000,
+ 0x38420000, 0x38424000, 0x38428000, 0x3842c000, 0x38430000, 0x38434000, 0x38438000, 0x3843c000,
+ 0x38440000, 0x38444000, 0x38448000, 0x3844c000, 0x38450000, 0x38454000, 0x38458000, 0x3845c000,
+ 0x38460000, 0x38464000, 0x38468000, 0x3846c000, 0x38470000, 0x38474000, 0x38478000, 0x3847c000,
+ 0x38480000, 0x38484000, 0x38488000, 0x3848c000, 0x38490000, 0x38494000, 0x38498000, 0x3849c000,
+ 0x384a0000, 0x384a4000, 0x384a8000, 0x384ac000, 0x384b0000, 0x384b4000, 0x384b8000, 0x384bc000,
+ 0x384c0000, 0x384c4000, 0x384c8000, 0x384cc000, 0x384d0000, 0x384d4000, 0x384d8000, 0x384dc000,
+ 0x384e0000, 0x384e4000, 0x384e8000, 0x384ec000, 0x384f0000, 0x384f4000, 0x384f8000, 0x384fc000,
+ 0x38500000, 0x38504000, 0x38508000, 0x3850c000, 0x38510000, 0x38514000, 0x38518000, 0x3851c000,
+ 0x38520000, 0x38524000, 0x38528000, 0x3852c000, 0x38530000, 0x38534000, 0x38538000, 0x3853c000,
+ 0x38540000, 0x38544000, 0x38548000, 0x3854c000, 0x38550000, 0x38554000, 0x38558000, 0x3855c000,
+ 0x38560000, 0x38564000, 0x38568000, 0x3856c000, 0x38570000, 0x38574000, 0x38578000, 0x3857c000,
+ 0x38580000, 0x38584000, 0x38588000, 0x3858c000, 0x38590000, 0x38594000, 0x38598000, 0x3859c000,
+ 0x385a0000, 0x385a4000, 0x385a8000, 0x385ac000, 0x385b0000, 0x385b4000, 0x385b8000, 0x385bc000,
+ 0x385c0000, 0x385c4000, 0x385c8000, 0x385cc000, 0x385d0000, 0x385d4000, 0x385d8000, 0x385dc000,
+ 0x385e0000, 0x385e4000, 0x385e8000, 0x385ec000, 0x385f0000, 0x385f4000, 0x385f8000, 0x385fc000,
+ 0x38600000, 0x38604000, 0x38608000, 0x3860c000, 0x38610000, 0x38614000, 0x38618000, 0x3861c000,
+ 0x38620000, 0x38624000, 0x38628000, 0x3862c000, 0x38630000, 0x38634000, 0x38638000, 0x3863c000,
+ 0x38640000, 0x38644000, 0x38648000, 0x3864c000, 0x38650000, 0x38654000, 0x38658000, 0x3865c000,
+ 0x38660000, 0x38664000, 0x38668000, 0x3866c000, 0x38670000, 0x38674000, 0x38678000, 0x3867c000,
+ 0x38680000, 0x38684000, 0x38688000, 0x3868c000, 0x38690000, 0x38694000, 0x38698000, 0x3869c000,
+ 0x386a0000, 0x386a4000, 0x386a8000, 0x386ac000, 0x386b0000, 0x386b4000, 0x386b8000, 0x386bc000,
+ 0x386c0000, 0x386c4000, 0x386c8000, 0x386cc000, 0x386d0000, 0x386d4000, 0x386d8000, 0x386dc000,
+ 0x386e0000, 0x386e4000, 0x386e8000, 0x386ec000, 0x386f0000, 0x386f4000, 0x386f8000, 0x386fc000,
+ 0x38700000, 0x38704000, 0x38708000, 0x3870c000, 0x38710000, 0x38714000, 0x38718000, 0x3871c000,
+ 0x38720000, 0x38724000, 0x38728000, 0x3872c000, 0x38730000, 0x38734000, 0x38738000, 0x3873c000,
+ 0x38740000, 0x38744000, 0x38748000, 0x3874c000, 0x38750000, 0x38754000, 0x38758000, 0x3875c000,
+ 0x38760000, 0x38764000, 0x38768000, 0x3876c000, 0x38770000, 0x38774000, 0x38778000, 0x3877c000,
+ 0x38780000, 0x38784000, 0x38788000, 0x3878c000, 0x38790000, 0x38794000, 0x38798000, 0x3879c000,
+ 0x387a0000, 0x387a4000, 0x387a8000, 0x387ac000, 0x387b0000, 0x387b4000, 0x387b8000, 0x387bc000,
+ 0x387c0000, 0x387c4000, 0x387c8000, 0x387cc000, 0x387d0000, 0x387d4000, 0x387d8000, 0x387dc000,
+ 0x387e0000, 0x387e4000, 0x387e8000, 0x387ec000, 0x387f0000, 0x387f4000, 0x387f8000, 0x387fc000,
+ 0x38000000, 0x38002000, 0x38004000, 0x38006000, 0x38008000, 0x3800a000, 0x3800c000, 0x3800e000,
+ 0x38010000, 0x38012000, 0x38014000, 0x38016000, 0x38018000, 0x3801a000, 0x3801c000, 0x3801e000,
+ 0x38020000, 0x38022000, 0x38024000, 0x38026000, 0x38028000, 0x3802a000, 0x3802c000, 0x3802e000,
+ 0x38030000, 0x38032000, 0x38034000, 0x38036000, 0x38038000, 0x3803a000, 0x3803c000, 0x3803e000,
+ 0x38040000, 0x38042000, 0x38044000, 0x38046000, 0x38048000, 0x3804a000, 0x3804c000, 0x3804e000,
+ 0x38050000, 0x38052000, 0x38054000, 0x38056000, 0x38058000, 0x3805a000, 0x3805c000, 0x3805e000,
+ 0x38060000, 0x38062000, 0x38064000, 0x38066000, 0x38068000, 0x3806a000, 0x3806c000, 0x3806e000,
+ 0x38070000, 0x38072000, 0x38074000, 0x38076000, 0x38078000, 0x3807a000, 0x3807c000, 0x3807e000,
+ 0x38080000, 0x38082000, 0x38084000, 0x38086000, 0x38088000, 0x3808a000, 0x3808c000, 0x3808e000,
+ 0x38090000, 0x38092000, 0x38094000, 0x38096000, 0x38098000, 0x3809a000, 0x3809c000, 0x3809e000,
+ 0x380a0000, 0x380a2000, 0x380a4000, 0x380a6000, 0x380a8000, 0x380aa000, 0x380ac000, 0x380ae000,
+ 0x380b0000, 0x380b2000, 0x380b4000, 0x380b6000, 0x380b8000, 0x380ba000, 0x380bc000, 0x380be000,
+ 0x380c0000, 0x380c2000, 0x380c4000, 0x380c6000, 0x380c8000, 0x380ca000, 0x380cc000, 0x380ce000,
+ 0x380d0000, 0x380d2000, 0x380d4000, 0x380d6000, 0x380d8000, 0x380da000, 0x380dc000, 0x380de000,
+ 0x380e0000, 0x380e2000, 0x380e4000, 0x380e6000, 0x380e8000, 0x380ea000, 0x380ec000, 0x380ee000,
+ 0x380f0000, 0x380f2000, 0x380f4000, 0x380f6000, 0x380f8000, 0x380fa000, 0x380fc000, 0x380fe000,
+ 0x38100000, 0x38102000, 0x38104000, 0x38106000, 0x38108000, 0x3810a000, 0x3810c000, 0x3810e000,
+ 0x38110000, 0x38112000, 0x38114000, 0x38116000, 0x38118000, 0x3811a000, 0x3811c000, 0x3811e000,
+ 0x38120000, 0x38122000, 0x38124000, 0x38126000, 0x38128000, 0x3812a000, 0x3812c000, 0x3812e000,
+ 0x38130000, 0x38132000, 0x38134000, 0x38136000, 0x38138000, 0x3813a000, 0x3813c000, 0x3813e000,
+ 0x38140000, 0x38142000, 0x38144000, 0x38146000, 0x38148000, 0x3814a000, 0x3814c000, 0x3814e000,
+ 0x38150000, 0x38152000, 0x38154000, 0x38156000, 0x38158000, 0x3815a000, 0x3815c000, 0x3815e000,
+ 0x38160000, 0x38162000, 0x38164000, 0x38166000, 0x38168000, 0x3816a000, 0x3816c000, 0x3816e000,
+ 0x38170000, 0x38172000, 0x38174000, 0x38176000, 0x38178000, 0x3817a000, 0x3817c000, 0x3817e000,
+ 0x38180000, 0x38182000, 0x38184000, 0x38186000, 0x38188000, 0x3818a000, 0x3818c000, 0x3818e000,
+ 0x38190000, 0x38192000, 0x38194000, 0x38196000, 0x38198000, 0x3819a000, 0x3819c000, 0x3819e000,
+ 0x381a0000, 0x381a2000, 0x381a4000, 0x381a6000, 0x381a8000, 0x381aa000, 0x381ac000, 0x381ae000,
+ 0x381b0000, 0x381b2000, 0x381b4000, 0x381b6000, 0x381b8000, 0x381ba000, 0x381bc000, 0x381be000,
+ 0x381c0000, 0x381c2000, 0x381c4000, 0x381c6000, 0x381c8000, 0x381ca000, 0x381cc000, 0x381ce000,
+ 0x381d0000, 0x381d2000, 0x381d4000, 0x381d6000, 0x381d8000, 0x381da000, 0x381dc000, 0x381de000,
+ 0x381e0000, 0x381e2000, 0x381e4000, 0x381e6000, 0x381e8000, 0x381ea000, 0x381ec000, 0x381ee000,
+ 0x381f0000, 0x381f2000, 0x381f4000, 0x381f6000, 0x381f8000, 0x381fa000, 0x381fc000, 0x381fe000,
+ 0x38200000, 0x38202000, 0x38204000, 0x38206000, 0x38208000, 0x3820a000, 0x3820c000, 0x3820e000,
+ 0x38210000, 0x38212000, 0x38214000, 0x38216000, 0x38218000, 0x3821a000, 0x3821c000, 0x3821e000,
+ 0x38220000, 0x38222000, 0x38224000, 0x38226000, 0x38228000, 0x3822a000, 0x3822c000, 0x3822e000,
+ 0x38230000, 0x38232000, 0x38234000, 0x38236000, 0x38238000, 0x3823a000, 0x3823c000, 0x3823e000,
+ 0x38240000, 0x38242000, 0x38244000, 0x38246000, 0x38248000, 0x3824a000, 0x3824c000, 0x3824e000,
+ 0x38250000, 0x38252000, 0x38254000, 0x38256000, 0x38258000, 0x3825a000, 0x3825c000, 0x3825e000,
+ 0x38260000, 0x38262000, 0x38264000, 0x38266000, 0x38268000, 0x3826a000, 0x3826c000, 0x3826e000,
+ 0x38270000, 0x38272000, 0x38274000, 0x38276000, 0x38278000, 0x3827a000, 0x3827c000, 0x3827e000,
+ 0x38280000, 0x38282000, 0x38284000, 0x38286000, 0x38288000, 0x3828a000, 0x3828c000, 0x3828e000,
+ 0x38290000, 0x38292000, 0x38294000, 0x38296000, 0x38298000, 0x3829a000, 0x3829c000, 0x3829e000,
+ 0x382a0000, 0x382a2000, 0x382a4000, 0x382a6000, 0x382a8000, 0x382aa000, 0x382ac000, 0x382ae000,
+ 0x382b0000, 0x382b2000, 0x382b4000, 0x382b6000, 0x382b8000, 0x382ba000, 0x382bc000, 0x382be000,
+ 0x382c0000, 0x382c2000, 0x382c4000, 0x382c6000, 0x382c8000, 0x382ca000, 0x382cc000, 0x382ce000,
+ 0x382d0000, 0x382d2000, 0x382d4000, 0x382d6000, 0x382d8000, 0x382da000, 0x382dc000, 0x382de000,
+ 0x382e0000, 0x382e2000, 0x382e4000, 0x382e6000, 0x382e8000, 0x382ea000, 0x382ec000, 0x382ee000,
+ 0x382f0000, 0x382f2000, 0x382f4000, 0x382f6000, 0x382f8000, 0x382fa000, 0x382fc000, 0x382fe000,
+ 0x38300000, 0x38302000, 0x38304000, 0x38306000, 0x38308000, 0x3830a000, 0x3830c000, 0x3830e000,
+ 0x38310000, 0x38312000, 0x38314000, 0x38316000, 0x38318000, 0x3831a000, 0x3831c000, 0x3831e000,
+ 0x38320000, 0x38322000, 0x38324000, 0x38326000, 0x38328000, 0x3832a000, 0x3832c000, 0x3832e000,
+ 0x38330000, 0x38332000, 0x38334000, 0x38336000, 0x38338000, 0x3833a000, 0x3833c000, 0x3833e000,
+ 0x38340000, 0x38342000, 0x38344000, 0x38346000, 0x38348000, 0x3834a000, 0x3834c000, 0x3834e000,
+ 0x38350000, 0x38352000, 0x38354000, 0x38356000, 0x38358000, 0x3835a000, 0x3835c000, 0x3835e000,
+ 0x38360000, 0x38362000, 0x38364000, 0x38366000, 0x38368000, 0x3836a000, 0x3836c000, 0x3836e000,
+ 0x38370000, 0x38372000, 0x38374000, 0x38376000, 0x38378000, 0x3837a000, 0x3837c000, 0x3837e000,
+ 0x38380000, 0x38382000, 0x38384000, 0x38386000, 0x38388000, 0x3838a000, 0x3838c000, 0x3838e000,
+ 0x38390000, 0x38392000, 0x38394000, 0x38396000, 0x38398000, 0x3839a000, 0x3839c000, 0x3839e000,
+ 0x383a0000, 0x383a2000, 0x383a4000, 0x383a6000, 0x383a8000, 0x383aa000, 0x383ac000, 0x383ae000,
+ 0x383b0000, 0x383b2000, 0x383b4000, 0x383b6000, 0x383b8000, 0x383ba000, 0x383bc000, 0x383be000,
+ 0x383c0000, 0x383c2000, 0x383c4000, 0x383c6000, 0x383c8000, 0x383ca000, 0x383cc000, 0x383ce000,
+ 0x383d0000, 0x383d2000, 0x383d4000, 0x383d6000, 0x383d8000, 0x383da000, 0x383dc000, 0x383de000,
+ 0x383e0000, 0x383e2000, 0x383e4000, 0x383e6000, 0x383e8000, 0x383ea000, 0x383ec000, 0x383ee000,
+ 0x383f0000, 0x383f2000, 0x383f4000, 0x383f6000, 0x383f8000, 0x383fa000, 0x383fc000, 0x383fe000,
+ 0x38400000, 0x38402000, 0x38404000, 0x38406000, 0x38408000, 0x3840a000, 0x3840c000, 0x3840e000,
+ 0x38410000, 0x38412000, 0x38414000, 0x38416000, 0x38418000, 0x3841a000, 0x3841c000, 0x3841e000,
+ 0x38420000, 0x38422000, 0x38424000, 0x38426000, 0x38428000, 0x3842a000, 0x3842c000, 0x3842e000,
+ 0x38430000, 0x38432000, 0x38434000, 0x38436000, 0x38438000, 0x3843a000, 0x3843c000, 0x3843e000,
+ 0x38440000, 0x38442000, 0x38444000, 0x38446000, 0x38448000, 0x3844a000, 0x3844c000, 0x3844e000,
+ 0x38450000, 0x38452000, 0x38454000, 0x38456000, 0x38458000, 0x3845a000, 0x3845c000, 0x3845e000,
+ 0x38460000, 0x38462000, 0x38464000, 0x38466000, 0x38468000, 0x3846a000, 0x3846c000, 0x3846e000,
+ 0x38470000, 0x38472000, 0x38474000, 0x38476000, 0x38478000, 0x3847a000, 0x3847c000, 0x3847e000,
+ 0x38480000, 0x38482000, 0x38484000, 0x38486000, 0x38488000, 0x3848a000, 0x3848c000, 0x3848e000,
+ 0x38490000, 0x38492000, 0x38494000, 0x38496000, 0x38498000, 0x3849a000, 0x3849c000, 0x3849e000,
+ 0x384a0000, 0x384a2000, 0x384a4000, 0x384a6000, 0x384a8000, 0x384aa000, 0x384ac000, 0x384ae000,
+ 0x384b0000, 0x384b2000, 0x384b4000, 0x384b6000, 0x384b8000, 0x384ba000, 0x384bc000, 0x384be000,
+ 0x384c0000, 0x384c2000, 0x384c4000, 0x384c6000, 0x384c8000, 0x384ca000, 0x384cc000, 0x384ce000,
+ 0x384d0000, 0x384d2000, 0x384d4000, 0x384d6000, 0x384d8000, 0x384da000, 0x384dc000, 0x384de000,
+ 0x384e0000, 0x384e2000, 0x384e4000, 0x384e6000, 0x384e8000, 0x384ea000, 0x384ec000, 0x384ee000,
+ 0x384f0000, 0x384f2000, 0x384f4000, 0x384f6000, 0x384f8000, 0x384fa000, 0x384fc000, 0x384fe000,
+ 0x38500000, 0x38502000, 0x38504000, 0x38506000, 0x38508000, 0x3850a000, 0x3850c000, 0x3850e000,
+ 0x38510000, 0x38512000, 0x38514000, 0x38516000, 0x38518000, 0x3851a000, 0x3851c000, 0x3851e000,
+ 0x38520000, 0x38522000, 0x38524000, 0x38526000, 0x38528000, 0x3852a000, 0x3852c000, 0x3852e000,
+ 0x38530000, 0x38532000, 0x38534000, 0x38536000, 0x38538000, 0x3853a000, 0x3853c000, 0x3853e000,
+ 0x38540000, 0x38542000, 0x38544000, 0x38546000, 0x38548000, 0x3854a000, 0x3854c000, 0x3854e000,
+ 0x38550000, 0x38552000, 0x38554000, 0x38556000, 0x38558000, 0x3855a000, 0x3855c000, 0x3855e000,
+ 0x38560000, 0x38562000, 0x38564000, 0x38566000, 0x38568000, 0x3856a000, 0x3856c000, 0x3856e000,
+ 0x38570000, 0x38572000, 0x38574000, 0x38576000, 0x38578000, 0x3857a000, 0x3857c000, 0x3857e000,
+ 0x38580000, 0x38582000, 0x38584000, 0x38586000, 0x38588000, 0x3858a000, 0x3858c000, 0x3858e000,
+ 0x38590000, 0x38592000, 0x38594000, 0x38596000, 0x38598000, 0x3859a000, 0x3859c000, 0x3859e000,
+ 0x385a0000, 0x385a2000, 0x385a4000, 0x385a6000, 0x385a8000, 0x385aa000, 0x385ac000, 0x385ae000,
+ 0x385b0000, 0x385b2000, 0x385b4000, 0x385b6000, 0x385b8000, 0x385ba000, 0x385bc000, 0x385be000,
+ 0x385c0000, 0x385c2000, 0x385c4000, 0x385c6000, 0x385c8000, 0x385ca000, 0x385cc000, 0x385ce000,
+ 0x385d0000, 0x385d2000, 0x385d4000, 0x385d6000, 0x385d8000, 0x385da000, 0x385dc000, 0x385de000,
+ 0x385e0000, 0x385e2000, 0x385e4000, 0x385e6000, 0x385e8000, 0x385ea000, 0x385ec000, 0x385ee000,
+ 0x385f0000, 0x385f2000, 0x385f4000, 0x385f6000, 0x385f8000, 0x385fa000, 0x385fc000, 0x385fe000,
+ 0x38600000, 0x38602000, 0x38604000, 0x38606000, 0x38608000, 0x3860a000, 0x3860c000, 0x3860e000,
+ 0x38610000, 0x38612000, 0x38614000, 0x38616000, 0x38618000, 0x3861a000, 0x3861c000, 0x3861e000,
+ 0x38620000, 0x38622000, 0x38624000, 0x38626000, 0x38628000, 0x3862a000, 0x3862c000, 0x3862e000,
+ 0x38630000, 0x38632000, 0x38634000, 0x38636000, 0x38638000, 0x3863a000, 0x3863c000, 0x3863e000,
+ 0x38640000, 0x38642000, 0x38644000, 0x38646000, 0x38648000, 0x3864a000, 0x3864c000, 0x3864e000,
+ 0x38650000, 0x38652000, 0x38654000, 0x38656000, 0x38658000, 0x3865a000, 0x3865c000, 0x3865e000,
+ 0x38660000, 0x38662000, 0x38664000, 0x38666000, 0x38668000, 0x3866a000, 0x3866c000, 0x3866e000,
+ 0x38670000, 0x38672000, 0x38674000, 0x38676000, 0x38678000, 0x3867a000, 0x3867c000, 0x3867e000,
+ 0x38680000, 0x38682000, 0x38684000, 0x38686000, 0x38688000, 0x3868a000, 0x3868c000, 0x3868e000,
+ 0x38690000, 0x38692000, 0x38694000, 0x38696000, 0x38698000, 0x3869a000, 0x3869c000, 0x3869e000,
+ 0x386a0000, 0x386a2000, 0x386a4000, 0x386a6000, 0x386a8000, 0x386aa000, 0x386ac000, 0x386ae000,
+ 0x386b0000, 0x386b2000, 0x386b4000, 0x386b6000, 0x386b8000, 0x386ba000, 0x386bc000, 0x386be000,
+ 0x386c0000, 0x386c2000, 0x386c4000, 0x386c6000, 0x386c8000, 0x386ca000, 0x386cc000, 0x386ce000,
+ 0x386d0000, 0x386d2000, 0x386d4000, 0x386d6000, 0x386d8000, 0x386da000, 0x386dc000, 0x386de000,
+ 0x386e0000, 0x386e2000, 0x386e4000, 0x386e6000, 0x386e8000, 0x386ea000, 0x386ec000, 0x386ee000,
+ 0x386f0000, 0x386f2000, 0x386f4000, 0x386f6000, 0x386f8000, 0x386fa000, 0x386fc000, 0x386fe000,
+ 0x38700000, 0x38702000, 0x38704000, 0x38706000, 0x38708000, 0x3870a000, 0x3870c000, 0x3870e000,
+ 0x38710000, 0x38712000, 0x38714000, 0x38716000, 0x38718000, 0x3871a000, 0x3871c000, 0x3871e000,
+ 0x38720000, 0x38722000, 0x38724000, 0x38726000, 0x38728000, 0x3872a000, 0x3872c000, 0x3872e000,
+ 0x38730000, 0x38732000, 0x38734000, 0x38736000, 0x38738000, 0x3873a000, 0x3873c000, 0x3873e000,
+ 0x38740000, 0x38742000, 0x38744000, 0x38746000, 0x38748000, 0x3874a000, 0x3874c000, 0x3874e000,
+ 0x38750000, 0x38752000, 0x38754000, 0x38756000, 0x38758000, 0x3875a000, 0x3875c000, 0x3875e000,
+ 0x38760000, 0x38762000, 0x38764000, 0x38766000, 0x38768000, 0x3876a000, 0x3876c000, 0x3876e000,
+ 0x38770000, 0x38772000, 0x38774000, 0x38776000, 0x38778000, 0x3877a000, 0x3877c000, 0x3877e000,
+ 0x38780000, 0x38782000, 0x38784000, 0x38786000, 0x38788000, 0x3878a000, 0x3878c000, 0x3878e000,
+ 0x38790000, 0x38792000, 0x38794000, 0x38796000, 0x38798000, 0x3879a000, 0x3879c000, 0x3879e000,
+ 0x387a0000, 0x387a2000, 0x387a4000, 0x387a6000, 0x387a8000, 0x387aa000, 0x387ac000, 0x387ae000,
+ 0x387b0000, 0x387b2000, 0x387b4000, 0x387b6000, 0x387b8000, 0x387ba000, 0x387bc000, 0x387be000,
+ 0x387c0000, 0x387c2000, 0x387c4000, 0x387c6000, 0x387c8000, 0x387ca000, 0x387cc000, 0x387ce000,
+ 0x387d0000, 0x387d2000, 0x387d4000, 0x387d6000, 0x387d8000, 0x387da000, 0x387dc000, 0x387de000,
+ 0x387e0000, 0x387e2000, 0x387e4000, 0x387e6000, 0x387e8000, 0x387ea000, 0x387ec000, 0x387ee000,
+ 0x387f0000, 0x387f2000, 0x387f4000, 0x387f6000, 0x387f8000, 0x387fa000, 0x387fc000, 0x387fe000,
+};
+
+const static unsigned g_exponent[64] = {
+ 0x00000000, 0x00800000, 0x01000000, 0x01800000, 0x02000000, 0x02800000, 0x03000000, 0x03800000,
+ 0x04000000, 0x04800000, 0x05000000, 0x05800000, 0x06000000, 0x06800000, 0x07000000, 0x07800000,
+ 0x08000000, 0x08800000, 0x09000000, 0x09800000, 0x0a000000, 0x0a800000, 0x0b000000, 0x0b800000,
+ 0x0c000000, 0x0c800000, 0x0d000000, 0x0d800000, 0x0e000000, 0x0e800000, 0x0f000000, 0x47800000,
+ 0x80000000, 0x80800000, 0x81000000, 0x81800000, 0x82000000, 0x82800000, 0x83000000, 0x83800000,
+ 0x84000000, 0x84800000, 0x85000000, 0x85800000, 0x86000000, 0x86800000, 0x87000000, 0x87800000,
+ 0x88000000, 0x88800000, 0x89000000, 0x89800000, 0x8a000000, 0x8a800000, 0x8b000000, 0x8b800000,
+ 0x8c000000, 0x8c800000, 0x8d000000, 0x8d800000, 0x8e000000, 0x8e800000, 0x8f000000, 0xc7800000,
+};
+
+const static unsigned g_offset[64] = {
+ 0x00000000, 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400,
+ 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400,
+ 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400,
+ 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400,
+ 0x00000000, 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400,
+ 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400,
+ 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400,
+ 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400,
+};
+
+float float16ToFloat32(unsigned short h)
+{
+ unsigned i32 = g_mantissa[g_offset[h >> 10] + (h & 0x3ff)] + g_exponent[h >> 10];
+ return bitCast<float>(i32);
+}
+} // namespace gl
diff --git a/gfx/angle/checkout/src/common/MemoryBuffer.cpp b/gfx/angle/checkout/src/common/MemoryBuffer.cpp
new file mode 100644
index 0000000000..aadffe8bbe
--- /dev/null
+++ b/gfx/angle/checkout/src/common/MemoryBuffer.cpp
@@ -0,0 +1,179 @@
+//
+// Copyright 2014 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+
+#include "common/MemoryBuffer.h"
+
+#include <algorithm>
+#include <cstdlib>
+
+#include "common/debug.h"
+
+namespace angle
+{
+
+// MemoryBuffer implementation.
+MemoryBuffer::~MemoryBuffer()
+{
+ if (mData)
+ {
+ free(mData);
+ mData = nullptr;
+ }
+}
+
+bool MemoryBuffer::resize(size_t size)
+{
+ if (size == 0)
+ {
+ if (mData)
+ {
+ free(mData);
+ mData = nullptr;
+ }
+ mSize = 0;
+ return true;
+ }
+
+ if (size == mSize)
+ {
+ return true;
+ }
+
+ // Only reallocate if the size has changed.
+ uint8_t *newMemory = static_cast<uint8_t *>(malloc(sizeof(uint8_t) * size));
+ if (newMemory == nullptr)
+ {
+ return false;
+ }
+
+ if (mData)
+ {
+ // Copy the intersection of the old data and the new data
+ std::copy(mData, mData + std::min(mSize, size), newMemory);
+ free(mData);
+ }
+
+ mData = newMemory;
+ mSize = size;
+
+ return true;
+}
+
+void MemoryBuffer::fill(uint8_t datum)
+{
+ if (!empty())
+ {
+ std::fill(mData, mData + mSize, datum);
+ }
+}
+
+MemoryBuffer::MemoryBuffer(MemoryBuffer &&other) : MemoryBuffer()
+{
+ *this = std::move(other);
+}
+
+MemoryBuffer &MemoryBuffer::operator=(MemoryBuffer &&other)
+{
+ std::swap(mSize, other.mSize);
+ std::swap(mData, other.mData);
+ return *this;
+}
+
+namespace
+{
+static constexpr uint32_t kDefaultScratchBufferLifetime = 1000u;
+
+} // anonymous namespace
+
+// ScratchBuffer implementation.
+ScratchBuffer::ScratchBuffer() : ScratchBuffer(kDefaultScratchBufferLifetime) {}
+
+ScratchBuffer::ScratchBuffer(uint32_t lifetime) : mLifetime(lifetime), mResetCounter(lifetime) {}
+
+ScratchBuffer::~ScratchBuffer() {}
+
+ScratchBuffer::ScratchBuffer(ScratchBuffer &&other)
+{
+ *this = std::move(other);
+}
+
+ScratchBuffer &ScratchBuffer::operator=(ScratchBuffer &&other)
+{
+ std::swap(mLifetime, other.mLifetime);
+ std::swap(mResetCounter, other.mResetCounter);
+ std::swap(mScratchMemory, other.mScratchMemory);
+ return *this;
+}
+
+bool ScratchBuffer::get(size_t requestedSize, MemoryBuffer **memoryBufferOut)
+{
+ return getImpl(requestedSize, memoryBufferOut, Optional<uint8_t>::Invalid());
+}
+
+bool ScratchBuffer::getInitialized(size_t requestedSize,
+ MemoryBuffer **memoryBufferOut,
+ uint8_t initValue)
+{
+ return getImpl(requestedSize, memoryBufferOut, Optional<uint8_t>(initValue));
+}
+
+bool ScratchBuffer::getImpl(size_t requestedSize,
+ MemoryBuffer **memoryBufferOut,
+ Optional<uint8_t> initValue)
+{
+ if (mScratchMemory.size() == requestedSize)
+ {
+ mResetCounter = mLifetime;
+ *memoryBufferOut = &mScratchMemory;
+ return true;
+ }
+
+ if (mScratchMemory.size() > requestedSize)
+ {
+ tick();
+ }
+
+ if (mScratchMemory.size() < requestedSize)
+ {
+ if (!mScratchMemory.resize(requestedSize))
+ {
+ return false;
+ }
+ mResetCounter = mLifetime;
+ if (initValue.valid())
+ {
+ mScratchMemory.fill(initValue.value());
+ }
+ }
+
+ ASSERT(mScratchMemory.size() >= requestedSize);
+
+ *memoryBufferOut = &mScratchMemory;
+ return true;
+}
+
+void ScratchBuffer::tick()
+{
+ if (mResetCounter > 0)
+ {
+ --mResetCounter;
+ if (mResetCounter == 0)
+ {
+ clear();
+ }
+ }
+}
+
+void ScratchBuffer::clear()
+{
+ mResetCounter = mLifetime;
+ if (mScratchMemory.size() > 0)
+ {
+ mScratchMemory.clear();
+ }
+}
+
+} // namespace angle
diff --git a/gfx/angle/checkout/src/common/MemoryBuffer.h b/gfx/angle/checkout/src/common/MemoryBuffer.h
new file mode 100644
index 0000000000..bcd3aab7a9
--- /dev/null
+++ b/gfx/angle/checkout/src/common/MemoryBuffer.h
@@ -0,0 +1,93 @@
+//
+// Copyright 2014 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+
+#ifndef COMMON_MEMORYBUFFER_H_
+#define COMMON_MEMORYBUFFER_H_
+
+#include "common/Optional.h"
+#include "common/angleutils.h"
+#include "common/debug.h"
+
+#include <stdint.h>
+#include <cstddef>
+
+namespace angle
+{
+
+class MemoryBuffer final : NonCopyable
+{
+ public:
+ MemoryBuffer() = default;
+ ~MemoryBuffer();
+
+ MemoryBuffer(MemoryBuffer &&other);
+ MemoryBuffer &operator=(MemoryBuffer &&other);
+
+ [[nodiscard]] bool resize(size_t size);
+ void clear() { (void)resize(0); }
+ size_t size() const { return mSize; }
+ bool empty() const { return mSize == 0; }
+
+ const uint8_t *data() const { return mData; }
+ uint8_t *data()
+ {
+ ASSERT(mData);
+ return mData;
+ }
+
+ uint8_t &operator[](size_t pos)
+ {
+ ASSERT(pos < mSize);
+ return mData[pos];
+ }
+ const uint8_t &operator[](size_t pos) const
+ {
+ ASSERT(pos < mSize);
+ return mData[pos];
+ }
+
+ void fill(uint8_t datum);
+
+ private:
+ size_t mSize = 0;
+ uint8_t *mData = nullptr;
+};
+
+class ScratchBuffer final : NonCopyable
+{
+ public:
+ // If we request a scratch buffer requesting a smaller size this many times, release and
+ // recreate the scratch buffer. This ensures we don't have a degenerate case where we are stuck
+ // hogging memory.
+ ScratchBuffer();
+ ScratchBuffer(uint32_t lifetime);
+ ~ScratchBuffer();
+
+ ScratchBuffer(ScratchBuffer &&other);
+ ScratchBuffer &operator=(ScratchBuffer &&other);
+
+ // Returns true with a memory buffer of the requested size, or false on failure.
+ bool get(size_t requestedSize, MemoryBuffer **memoryBufferOut);
+
+ // Same as get, but ensures new values are initialized to a fixed constant.
+ bool getInitialized(size_t requestedSize, MemoryBuffer **memoryBufferOut, uint8_t initValue);
+
+ // Ticks the release counter for the scratch buffer. Also done implicitly in get().
+ void tick();
+
+ void clear();
+
+ private:
+ bool getImpl(size_t requestedSize, MemoryBuffer **memoryBufferOut, Optional<uint8_t> initValue);
+
+ uint32_t mLifetime;
+ uint32_t mResetCounter;
+ MemoryBuffer mScratchMemory;
+};
+
+} // namespace angle
+
+#endif // COMMON_MEMORYBUFFER_H_
diff --git a/gfx/angle/checkout/src/common/Optional.h b/gfx/angle/checkout/src/common/Optional.h
new file mode 100644
index 0000000000..46c65dde4e
--- /dev/null
+++ b/gfx/angle/checkout/src/common/Optional.h
@@ -0,0 +1,74 @@
+//
+// Copyright 2015 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Optional.h:
+// Represents a type that may be invalid, similar to std::optional.
+//
+
+#ifndef COMMON_OPTIONAL_H_
+#define COMMON_OPTIONAL_H_
+
+#include <utility>
+
+template <class T>
+struct Optional
+{
+ Optional() : mValid(false), mValue(T()) {}
+
+ Optional(const T &valueIn) : mValid(true), mValue(valueIn) {}
+
+ Optional(const Optional &other) : mValid(other.mValid), mValue(other.mValue) {}
+
+ Optional &operator=(const Optional &other)
+ {
+ this->mValid = other.mValid;
+ this->mValue = other.mValue;
+ return *this;
+ }
+
+ Optional &operator=(const T &value)
+ {
+ mValue = value;
+ mValid = true;
+ return *this;
+ }
+
+ Optional &operator=(T &&value)
+ {
+ mValue = std::move(value);
+ mValid = true;
+ return *this;
+ }
+
+ void reset() { mValid = false; }
+ T &&release()
+ {
+ mValid = false;
+ return std::move(mValue);
+ }
+
+ static Optional Invalid() { return Optional(); }
+
+ bool valid() const { return mValid; }
+ T &value() { return mValue; }
+ const T &value() const { return mValue; }
+
+ bool operator==(const Optional &other) const
+ {
+ return ((mValid == other.mValid) && (!mValid || (mValue == other.mValue)));
+ }
+
+ bool operator!=(const Optional &other) const { return !(*this == other); }
+
+ bool operator==(const T &value) const { return mValid && (mValue == value); }
+
+ bool operator!=(const T &value) const { return !(*this == value); }
+
+ private:
+ bool mValid;
+ T mValue;
+};
+
+#endif // COMMON_OPTIONAL_H_
diff --git a/gfx/angle/checkout/src/common/PackedEGLEnums_autogen.cpp b/gfx/angle/checkout/src/common/PackedEGLEnums_autogen.cpp
new file mode 100644
index 0000000000..738254bd18
--- /dev/null
+++ b/gfx/angle/checkout/src/common/PackedEGLEnums_autogen.cpp
@@ -0,0 +1,452 @@
+// GENERATED FILE - DO NOT EDIT.
+// Generated by gen_packed_gl_enums.py using data from packed_egl_enums.json.
+//
+// Copyright 2017 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// PackedEGLEnums_autogen.cpp:
+// Implements ANGLE-specific enums classes for EGLenums and functions operating
+// on them.
+
+#include "common/PackedEGLEnums_autogen.h"
+#include "common/debug.h"
+
+namespace egl
+{
+
+template <>
+ColorSpace FromEGLenum<ColorSpace>(EGLenum from)
+{
+ switch (from)
+ {
+ case EGL_COLORSPACE_sRGB:
+ return ColorSpace::sRGB;
+ case EGL_COLORSPACE_LINEAR:
+ return ColorSpace::Linear;
+ default:
+ return ColorSpace::InvalidEnum;
+ }
+}
+
+EGLenum ToEGLenum(ColorSpace from)
+{
+ switch (from)
+ {
+ case ColorSpace::sRGB:
+ return EGL_COLORSPACE_sRGB;
+ case ColorSpace::Linear:
+ return EGL_COLORSPACE_LINEAR;
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+}
+
+std::ostream &operator<<(std::ostream &os, ColorSpace value)
+{
+ switch (value)
+ {
+ case ColorSpace::sRGB:
+ os << "EGL_COLORSPACE_sRGB";
+ break;
+ case ColorSpace::Linear:
+ os << "EGL_COLORSPACE_LINEAR";
+ break;
+ default:
+ os << "GL_INVALID_ENUM";
+ break;
+ }
+ return os;
+}
+
+template <>
+CompositorTiming FromEGLenum<CompositorTiming>(EGLenum from)
+{
+ switch (from)
+ {
+ case EGL_COMPOSITE_DEADLINE_ANDROID:
+ return CompositorTiming::CompositeDeadline;
+ case EGL_COMPOSITE_INTERVAL_ANDROID:
+ return CompositorTiming::CompositInterval;
+ case EGL_COMPOSITE_TO_PRESENT_LATENCY_ANDROID:
+ return CompositorTiming::CompositToPresentLatency;
+ default:
+ return CompositorTiming::InvalidEnum;
+ }
+}
+
+EGLenum ToEGLenum(CompositorTiming from)
+{
+ switch (from)
+ {
+ case CompositorTiming::CompositeDeadline:
+ return EGL_COMPOSITE_DEADLINE_ANDROID;
+ case CompositorTiming::CompositInterval:
+ return EGL_COMPOSITE_INTERVAL_ANDROID;
+ case CompositorTiming::CompositToPresentLatency:
+ return EGL_COMPOSITE_TO_PRESENT_LATENCY_ANDROID;
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+}
+
+std::ostream &operator<<(std::ostream &os, CompositorTiming value)
+{
+ switch (value)
+ {
+ case CompositorTiming::CompositeDeadline:
+ os << "EGL_COMPOSITE_DEADLINE_ANDROID";
+ break;
+ case CompositorTiming::CompositInterval:
+ os << "EGL_COMPOSITE_INTERVAL_ANDROID";
+ break;
+ case CompositorTiming::CompositToPresentLatency:
+ os << "EGL_COMPOSITE_TO_PRESENT_LATENCY_ANDROID";
+ break;
+ default:
+ os << "GL_INVALID_ENUM";
+ break;
+ }
+ return os;
+}
+
+template <>
+ContextPriority FromEGLenum<ContextPriority>(EGLenum from)
+{
+ switch (from)
+ {
+ case EGL_CONTEXT_PRIORITY_LOW_IMG:
+ return ContextPriority::Low;
+ case EGL_CONTEXT_PRIORITY_MEDIUM_IMG:
+ return ContextPriority::Medium;
+ case EGL_CONTEXT_PRIORITY_HIGH_IMG:
+ return ContextPriority::High;
+ default:
+ return ContextPriority::InvalidEnum;
+ }
+}
+
+EGLenum ToEGLenum(ContextPriority from)
+{
+ switch (from)
+ {
+ case ContextPriority::Low:
+ return EGL_CONTEXT_PRIORITY_LOW_IMG;
+ case ContextPriority::Medium:
+ return EGL_CONTEXT_PRIORITY_MEDIUM_IMG;
+ case ContextPriority::High:
+ return EGL_CONTEXT_PRIORITY_HIGH_IMG;
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+}
+
+std::ostream &operator<<(std::ostream &os, ContextPriority value)
+{
+ switch (value)
+ {
+ case ContextPriority::Low:
+ os << "EGL_CONTEXT_PRIORITY_LOW_IMG";
+ break;
+ case ContextPriority::Medium:
+ os << "EGL_CONTEXT_PRIORITY_MEDIUM_IMG";
+ break;
+ case ContextPriority::High:
+ os << "EGL_CONTEXT_PRIORITY_HIGH_IMG";
+ break;
+ default:
+ os << "GL_INVALID_ENUM";
+ break;
+ }
+ return os;
+}
+
+template <>
+MessageType FromEGLenum<MessageType>(EGLenum from)
+{
+ switch (from)
+ {
+ case EGL_DEBUG_MSG_CRITICAL_KHR:
+ return MessageType::Critical;
+ case EGL_DEBUG_MSG_ERROR_KHR:
+ return MessageType::Error;
+ case EGL_DEBUG_MSG_WARN_KHR:
+ return MessageType::Warn;
+ case EGL_DEBUG_MSG_INFO_KHR:
+ return MessageType::Info;
+ default:
+ return MessageType::InvalidEnum;
+ }
+}
+
+EGLenum ToEGLenum(MessageType from)
+{
+ switch (from)
+ {
+ case MessageType::Critical:
+ return EGL_DEBUG_MSG_CRITICAL_KHR;
+ case MessageType::Error:
+ return EGL_DEBUG_MSG_ERROR_KHR;
+ case MessageType::Warn:
+ return EGL_DEBUG_MSG_WARN_KHR;
+ case MessageType::Info:
+ return EGL_DEBUG_MSG_INFO_KHR;
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+}
+
+std::ostream &operator<<(std::ostream &os, MessageType value)
+{
+ switch (value)
+ {
+ case MessageType::Critical:
+ os << "EGL_DEBUG_MSG_CRITICAL_KHR";
+ break;
+ case MessageType::Error:
+ os << "EGL_DEBUG_MSG_ERROR_KHR";
+ break;
+ case MessageType::Warn:
+ os << "EGL_DEBUG_MSG_WARN_KHR";
+ break;
+ case MessageType::Info:
+ os << "EGL_DEBUG_MSG_INFO_KHR";
+ break;
+ default:
+ os << "GL_INVALID_ENUM";
+ break;
+ }
+ return os;
+}
+
+template <>
+ObjectType FromEGLenum<ObjectType>(EGLenum from)
+{
+ switch (from)
+ {
+ case EGL_OBJECT_THREAD_KHR:
+ return ObjectType::Thread;
+ case EGL_OBJECT_DISPLAY_KHR:
+ return ObjectType::Display;
+ case EGL_OBJECT_CONTEXT_KHR:
+ return ObjectType::Context;
+ case EGL_OBJECT_SURFACE_KHR:
+ return ObjectType::Surface;
+ case EGL_OBJECT_IMAGE_KHR:
+ return ObjectType::Image;
+ case EGL_OBJECT_SYNC_KHR:
+ return ObjectType::Sync;
+ case EGL_OBJECT_STREAM_KHR:
+ return ObjectType::Stream;
+ default:
+ return ObjectType::InvalidEnum;
+ }
+}
+
+EGLenum ToEGLenum(ObjectType from)
+{
+ switch (from)
+ {
+ case ObjectType::Thread:
+ return EGL_OBJECT_THREAD_KHR;
+ case ObjectType::Display:
+ return EGL_OBJECT_DISPLAY_KHR;
+ case ObjectType::Context:
+ return EGL_OBJECT_CONTEXT_KHR;
+ case ObjectType::Surface:
+ return EGL_OBJECT_SURFACE_KHR;
+ case ObjectType::Image:
+ return EGL_OBJECT_IMAGE_KHR;
+ case ObjectType::Sync:
+ return EGL_OBJECT_SYNC_KHR;
+ case ObjectType::Stream:
+ return EGL_OBJECT_STREAM_KHR;
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+}
+
+std::ostream &operator<<(std::ostream &os, ObjectType value)
+{
+ switch (value)
+ {
+ case ObjectType::Thread:
+ os << "EGL_OBJECT_THREAD_KHR";
+ break;
+ case ObjectType::Display:
+ os << "EGL_OBJECT_DISPLAY_KHR";
+ break;
+ case ObjectType::Context:
+ os << "EGL_OBJECT_CONTEXT_KHR";
+ break;
+ case ObjectType::Surface:
+ os << "EGL_OBJECT_SURFACE_KHR";
+ break;
+ case ObjectType::Image:
+ os << "EGL_OBJECT_IMAGE_KHR";
+ break;
+ case ObjectType::Sync:
+ os << "EGL_OBJECT_SYNC_KHR";
+ break;
+ case ObjectType::Stream:
+ os << "EGL_OBJECT_STREAM_KHR";
+ break;
+ default:
+ os << "GL_INVALID_ENUM";
+ break;
+ }
+ return os;
+}
+
+template <>
+TextureFormat FromEGLenum<TextureFormat>(EGLenum from)
+{
+ switch (from)
+ {
+ case EGL_NO_TEXTURE:
+ return TextureFormat::NoTexture;
+ case EGL_TEXTURE_RGB:
+ return TextureFormat::RGB;
+ case EGL_TEXTURE_RGBA:
+ return TextureFormat::RGBA;
+ default:
+ return TextureFormat::InvalidEnum;
+ }
+}
+
+EGLenum ToEGLenum(TextureFormat from)
+{
+ switch (from)
+ {
+ case TextureFormat::NoTexture:
+ return EGL_NO_TEXTURE;
+ case TextureFormat::RGB:
+ return EGL_TEXTURE_RGB;
+ case TextureFormat::RGBA:
+ return EGL_TEXTURE_RGBA;
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+}
+
+std::ostream &operator<<(std::ostream &os, TextureFormat value)
+{
+ switch (value)
+ {
+ case TextureFormat::NoTexture:
+ os << "EGL_NO_TEXTURE";
+ break;
+ case TextureFormat::RGB:
+ os << "EGL_TEXTURE_RGB";
+ break;
+ case TextureFormat::RGBA:
+ os << "EGL_TEXTURE_RGBA";
+ break;
+ default:
+ os << "GL_INVALID_ENUM";
+ break;
+ }
+ return os;
+}
+
+template <>
+Timestamp FromEGLenum<Timestamp>(EGLenum from)
+{
+ switch (from)
+ {
+ case EGL_REQUESTED_PRESENT_TIME_ANDROID:
+ return Timestamp::RequestedPresentTime;
+ case EGL_RENDERING_COMPLETE_TIME_ANDROID:
+ return Timestamp::RenderingCompleteTime;
+ case EGL_COMPOSITION_LATCH_TIME_ANDROID:
+ return Timestamp::CompositionLatchTime;
+ case EGL_FIRST_COMPOSITION_START_TIME_ANDROID:
+ return Timestamp::FirstCompositionStartTime;
+ case EGL_LAST_COMPOSITION_START_TIME_ANDROID:
+ return Timestamp::LastCompositionStartTime;
+ case EGL_FIRST_COMPOSITION_GPU_FINISHED_TIME_ANDROID:
+ return Timestamp::FirstCompositionGPUFinishedTime;
+ case EGL_DISPLAY_PRESENT_TIME_ANDROID:
+ return Timestamp::DisplayPresentTime;
+ case EGL_DEQUEUE_READY_TIME_ANDROID:
+ return Timestamp::DequeueReadyTime;
+ case EGL_READS_DONE_TIME_ANDROID:
+ return Timestamp::ReadsDoneTime;
+ default:
+ return Timestamp::InvalidEnum;
+ }
+}
+
+EGLenum ToEGLenum(Timestamp from)
+{
+ switch (from)
+ {
+ case Timestamp::RequestedPresentTime:
+ return EGL_REQUESTED_PRESENT_TIME_ANDROID;
+ case Timestamp::RenderingCompleteTime:
+ return EGL_RENDERING_COMPLETE_TIME_ANDROID;
+ case Timestamp::CompositionLatchTime:
+ return EGL_COMPOSITION_LATCH_TIME_ANDROID;
+ case Timestamp::FirstCompositionStartTime:
+ return EGL_FIRST_COMPOSITION_START_TIME_ANDROID;
+ case Timestamp::LastCompositionStartTime:
+ return EGL_LAST_COMPOSITION_START_TIME_ANDROID;
+ case Timestamp::FirstCompositionGPUFinishedTime:
+ return EGL_FIRST_COMPOSITION_GPU_FINISHED_TIME_ANDROID;
+ case Timestamp::DisplayPresentTime:
+ return EGL_DISPLAY_PRESENT_TIME_ANDROID;
+ case Timestamp::DequeueReadyTime:
+ return EGL_DEQUEUE_READY_TIME_ANDROID;
+ case Timestamp::ReadsDoneTime:
+ return EGL_READS_DONE_TIME_ANDROID;
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+}
+
+std::ostream &operator<<(std::ostream &os, Timestamp value)
+{
+ switch (value)
+ {
+ case Timestamp::RequestedPresentTime:
+ os << "EGL_REQUESTED_PRESENT_TIME_ANDROID";
+ break;
+ case Timestamp::RenderingCompleteTime:
+ os << "EGL_RENDERING_COMPLETE_TIME_ANDROID";
+ break;
+ case Timestamp::CompositionLatchTime:
+ os << "EGL_COMPOSITION_LATCH_TIME_ANDROID";
+ break;
+ case Timestamp::FirstCompositionStartTime:
+ os << "EGL_FIRST_COMPOSITION_START_TIME_ANDROID";
+ break;
+ case Timestamp::LastCompositionStartTime:
+ os << "EGL_LAST_COMPOSITION_START_TIME_ANDROID";
+ break;
+ case Timestamp::FirstCompositionGPUFinishedTime:
+ os << "EGL_FIRST_COMPOSITION_GPU_FINISHED_TIME_ANDROID";
+ break;
+ case Timestamp::DisplayPresentTime:
+ os << "EGL_DISPLAY_PRESENT_TIME_ANDROID";
+ break;
+ case Timestamp::DequeueReadyTime:
+ os << "EGL_DEQUEUE_READY_TIME_ANDROID";
+ break;
+ case Timestamp::ReadsDoneTime:
+ os << "EGL_READS_DONE_TIME_ANDROID";
+ break;
+ default:
+ os << "GL_INVALID_ENUM";
+ break;
+ }
+ return os;
+}
+
+} // namespace egl
diff --git a/gfx/angle/checkout/src/common/PackedEGLEnums_autogen.h b/gfx/angle/checkout/src/common/PackedEGLEnums_autogen.h
new file mode 100644
index 0000000000..7794e8509e
--- /dev/null
+++ b/gfx/angle/checkout/src/common/PackedEGLEnums_autogen.h
@@ -0,0 +1,144 @@
+// GENERATED FILE - DO NOT EDIT.
+// Generated by gen_packed_gl_enums.py using data from packed_egl_enums.json.
+//
+// Copyright 2017 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// PackedEGLEnums_autogen.h:
+// Declares ANGLE-specific enums classes for EGLenums and functions operating
+// on them.
+
+#ifndef COMMON_PACKEDEGLENUMS_AUTOGEN_H_
+#define COMMON_PACKEDEGLENUMS_AUTOGEN_H_
+
+#include <EGL/egl.h>
+#include <EGL/eglext.h>
+
+#include <cstdint>
+#include <ostream>
+
+namespace egl
+{
+
+template <typename Enum>
+Enum FromEGLenum(EGLenum from);
+
+enum class ColorSpace : uint8_t
+{
+ sRGB = 0,
+ Linear = 1,
+
+ InvalidEnum = 2,
+ EnumCount = 2,
+};
+
+template <>
+ColorSpace FromEGLenum<ColorSpace>(EGLenum from);
+EGLenum ToEGLenum(ColorSpace from);
+std::ostream &operator<<(std::ostream &os, ColorSpace value);
+
+enum class CompositorTiming : uint8_t
+{
+ CompositeDeadline = 0,
+ CompositInterval = 1,
+ CompositToPresentLatency = 2,
+
+ InvalidEnum = 3,
+ EnumCount = 3,
+};
+
+template <>
+CompositorTiming FromEGLenum<CompositorTiming>(EGLenum from);
+EGLenum ToEGLenum(CompositorTiming from);
+std::ostream &operator<<(std::ostream &os, CompositorTiming value);
+
+enum class ContextPriority : uint8_t
+{
+ Low = 0,
+ Medium = 1,
+ High = 2,
+
+ InvalidEnum = 3,
+ EnumCount = 3,
+};
+
+template <>
+ContextPriority FromEGLenum<ContextPriority>(EGLenum from);
+EGLenum ToEGLenum(ContextPriority from);
+std::ostream &operator<<(std::ostream &os, ContextPriority value);
+
+enum class MessageType : uint8_t
+{
+ Critical = 0,
+ Error = 1,
+ Warn = 2,
+ Info = 3,
+
+ InvalidEnum = 4,
+ EnumCount = 4,
+};
+
+template <>
+MessageType FromEGLenum<MessageType>(EGLenum from);
+EGLenum ToEGLenum(MessageType from);
+std::ostream &operator<<(std::ostream &os, MessageType value);
+
+enum class ObjectType : uint8_t
+{
+ Thread = 0,
+ Display = 1,
+ Context = 2,
+ Surface = 3,
+ Image = 4,
+ Sync = 5,
+ Stream = 6,
+
+ InvalidEnum = 7,
+ EnumCount = 7,
+};
+
+template <>
+ObjectType FromEGLenum<ObjectType>(EGLenum from);
+EGLenum ToEGLenum(ObjectType from);
+std::ostream &operator<<(std::ostream &os, ObjectType value);
+
+enum class TextureFormat : uint8_t
+{
+ NoTexture = 0,
+ RGB = 1,
+ RGBA = 2,
+
+ InvalidEnum = 3,
+ EnumCount = 3,
+};
+
+template <>
+TextureFormat FromEGLenum<TextureFormat>(EGLenum from);
+EGLenum ToEGLenum(TextureFormat from);
+std::ostream &operator<<(std::ostream &os, TextureFormat value);
+
+enum class Timestamp : uint8_t
+{
+ RequestedPresentTime = 0,
+ RenderingCompleteTime = 1,
+ CompositionLatchTime = 2,
+ FirstCompositionStartTime = 3,
+ LastCompositionStartTime = 4,
+ FirstCompositionGPUFinishedTime = 5,
+ DisplayPresentTime = 6,
+ DequeueReadyTime = 7,
+ ReadsDoneTime = 8,
+
+ InvalidEnum = 9,
+ EnumCount = 9,
+};
+
+template <>
+Timestamp FromEGLenum<Timestamp>(EGLenum from);
+EGLenum ToEGLenum(Timestamp from);
+std::ostream &operator<<(std::ostream &os, Timestamp value);
+
+} // namespace egl
+
+#endif // COMMON_PACKEDEGLENUMS_AUTOGEN_H_
diff --git a/gfx/angle/checkout/src/common/PackedEnums.cpp b/gfx/angle/checkout/src/common/PackedEnums.cpp
new file mode 100644
index 0000000000..e13a502a42
--- /dev/null
+++ b/gfx/angle/checkout/src/common/PackedEnums.cpp
@@ -0,0 +1,673 @@
+// Copyright 2018 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// PackedGLEnums.cpp:
+// Declares ANGLE-specific enums classes for GLEnum and functions operating
+// on them.
+
+#include "common/PackedEnums.h"
+
+#include "common/utilities.h"
+
+namespace gl
+{
+
+TextureType TextureTargetToType(TextureTarget target)
+{
+ switch (target)
+ {
+ case TextureTarget::CubeMapNegativeX:
+ case TextureTarget::CubeMapNegativeY:
+ case TextureTarget::CubeMapNegativeZ:
+ case TextureTarget::CubeMapPositiveX:
+ case TextureTarget::CubeMapPositiveY:
+ case TextureTarget::CubeMapPositiveZ:
+ return TextureType::CubeMap;
+ case TextureTarget::CubeMapArray:
+ return TextureType::CubeMapArray;
+ case TextureTarget::External:
+ return TextureType::External;
+ case TextureTarget::Rectangle:
+ return TextureType::Rectangle;
+ case TextureTarget::_2D:
+ return TextureType::_2D;
+ case TextureTarget::_2DArray:
+ return TextureType::_2DArray;
+ case TextureTarget::_2DMultisample:
+ return TextureType::_2DMultisample;
+ case TextureTarget::_2DMultisampleArray:
+ return TextureType::_2DMultisampleArray;
+ case TextureTarget::_3D:
+ return TextureType::_3D;
+ case TextureTarget::VideoImage:
+ return TextureType::VideoImage;
+ case TextureTarget::Buffer:
+ return TextureType::Buffer;
+ case TextureTarget::InvalidEnum:
+ return TextureType::InvalidEnum;
+ default:
+ UNREACHABLE();
+ return TextureType::InvalidEnum;
+ }
+}
+
+bool IsCubeMapFaceTarget(TextureTarget target)
+{
+ return TextureTargetToType(target) == TextureType::CubeMap;
+}
+
+TextureTarget NonCubeTextureTypeToTarget(TextureType type)
+{
+ switch (type)
+ {
+ case TextureType::External:
+ return TextureTarget::External;
+ case TextureType::Rectangle:
+ return TextureTarget::Rectangle;
+ case TextureType::_2D:
+ return TextureTarget::_2D;
+ case TextureType::_2DArray:
+ return TextureTarget::_2DArray;
+ case TextureType::_2DMultisample:
+ return TextureTarget::_2DMultisample;
+ case TextureType::_2DMultisampleArray:
+ return TextureTarget::_2DMultisampleArray;
+ case TextureType::_3D:
+ return TextureTarget::_3D;
+ case TextureType::CubeMapArray:
+ return TextureTarget::CubeMapArray;
+ case TextureType::VideoImage:
+ return TextureTarget::VideoImage;
+ case TextureType::Buffer:
+ return TextureTarget::Buffer;
+ default:
+ UNREACHABLE();
+ return TextureTarget::InvalidEnum;
+ }
+}
+
+// Check that we can do arithmetic on TextureTarget to convert from / to cube map faces
+static_assert(static_cast<uint8_t>(TextureTarget::CubeMapNegativeX) -
+ static_cast<uint8_t>(TextureTarget::CubeMapPositiveX) ==
+ 1u,
+ "");
+static_assert(static_cast<uint8_t>(TextureTarget::CubeMapPositiveY) -
+ static_cast<uint8_t>(TextureTarget::CubeMapPositiveX) ==
+ 2u,
+ "");
+static_assert(static_cast<uint8_t>(TextureTarget::CubeMapNegativeY) -
+ static_cast<uint8_t>(TextureTarget::CubeMapPositiveX) ==
+ 3u,
+ "");
+static_assert(static_cast<uint8_t>(TextureTarget::CubeMapPositiveZ) -
+ static_cast<uint8_t>(TextureTarget::CubeMapPositiveX) ==
+ 4u,
+ "");
+static_assert(static_cast<uint8_t>(TextureTarget::CubeMapNegativeZ) -
+ static_cast<uint8_t>(TextureTarget::CubeMapPositiveX) ==
+ 5u,
+ "");
+
+TextureTarget CubeFaceIndexToTextureTarget(size_t face)
+{
+ ASSERT(face < 6u);
+ return static_cast<TextureTarget>(static_cast<uint8_t>(TextureTarget::CubeMapPositiveX) + face);
+}
+
+size_t CubeMapTextureTargetToFaceIndex(TextureTarget target)
+{
+ ASSERT(IsCubeMapFaceTarget(target));
+ return static_cast<uint8_t>(target) - static_cast<uint8_t>(TextureTarget::CubeMapPositiveX);
+}
+
+TextureType SamplerTypeToTextureType(GLenum samplerType)
+{
+ switch (samplerType)
+ {
+ case GL_SAMPLER_2D:
+ case GL_INT_SAMPLER_2D:
+ case GL_UNSIGNED_INT_SAMPLER_2D:
+ case GL_SAMPLER_2D_SHADOW:
+ return TextureType::_2D;
+
+ case GL_SAMPLER_EXTERNAL_OES:
+ case GL_SAMPLER_EXTERNAL_2D_Y2Y_EXT:
+ return TextureType::External;
+
+ case GL_SAMPLER_CUBE:
+ case GL_INT_SAMPLER_CUBE:
+ case GL_UNSIGNED_INT_SAMPLER_CUBE:
+ case GL_SAMPLER_CUBE_SHADOW:
+ return TextureType::CubeMap;
+
+ case GL_SAMPLER_CUBE_MAP_ARRAY:
+ case GL_INT_SAMPLER_CUBE_MAP_ARRAY:
+ case GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY:
+ case GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW:
+ return TextureType::CubeMapArray;
+
+ case GL_SAMPLER_2D_ARRAY:
+ case GL_INT_SAMPLER_2D_ARRAY:
+ case GL_UNSIGNED_INT_SAMPLER_2D_ARRAY:
+ case GL_SAMPLER_2D_ARRAY_SHADOW:
+ return TextureType::_2DArray;
+
+ case GL_SAMPLER_3D:
+ case GL_INT_SAMPLER_3D:
+ case GL_UNSIGNED_INT_SAMPLER_3D:
+ return TextureType::_3D;
+
+ case GL_SAMPLER_2D_MULTISAMPLE:
+ case GL_INT_SAMPLER_2D_MULTISAMPLE:
+ case GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE:
+ return TextureType::_2DMultisample;
+
+ case GL_SAMPLER_2D_MULTISAMPLE_ARRAY:
+ case GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY:
+ case GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY:
+ return TextureType::_2DMultisampleArray;
+
+ case GL_SAMPLER_BUFFER:
+ case GL_INT_SAMPLER_BUFFER:
+ case GL_UNSIGNED_INT_SAMPLER_BUFFER:
+ return TextureType::Buffer;
+
+ case GL_SAMPLER_2D_RECT_ANGLE:
+ return TextureType::Rectangle;
+
+ case GL_SAMPLER_VIDEO_IMAGE_WEBGL:
+ return TextureType::VideoImage;
+
+ default:
+ UNREACHABLE();
+ return TextureType::InvalidEnum;
+ }
+}
+
+TextureType ImageTypeToTextureType(GLenum imageType)
+{
+ switch (imageType)
+ {
+ case GL_IMAGE_2D:
+ case GL_INT_IMAGE_2D:
+ case GL_UNSIGNED_INT_IMAGE_2D:
+ return TextureType::_2D;
+
+ case GL_IMAGE_CUBE:
+ case GL_INT_IMAGE_CUBE:
+ case GL_UNSIGNED_INT_IMAGE_CUBE:
+ return TextureType::CubeMap;
+
+ case GL_IMAGE_CUBE_MAP_ARRAY:
+ case GL_INT_IMAGE_CUBE_MAP_ARRAY:
+ case GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY:
+ return TextureType::CubeMapArray;
+
+ case GL_IMAGE_2D_ARRAY:
+ case GL_INT_IMAGE_2D_ARRAY:
+ case GL_UNSIGNED_INT_IMAGE_2D_ARRAY:
+ return TextureType::_2DArray;
+
+ case GL_IMAGE_3D:
+ case GL_INT_IMAGE_3D:
+ case GL_UNSIGNED_INT_IMAGE_3D:
+ return TextureType::_3D;
+
+ case GL_IMAGE_BUFFER:
+ case GL_INT_IMAGE_BUFFER:
+ case GL_UNSIGNED_INT_IMAGE_BUFFER:
+ return TextureType::Buffer;
+
+ default:
+ UNREACHABLE();
+ return TextureType::InvalidEnum;
+ }
+}
+
+bool IsMultisampled(TextureType type)
+{
+ switch (type)
+ {
+ case TextureType::_2DMultisample:
+ case TextureType::_2DMultisampleArray:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool IsArrayTextureType(TextureType type)
+{
+ switch (type)
+ {
+ case TextureType::_2DArray:
+ case TextureType::_2DMultisampleArray:
+ case TextureType::CubeMapArray:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool IsStaticBufferUsage(BufferUsage useage)
+{
+ switch (useage)
+ {
+ case BufferUsage::StaticCopy:
+ case BufferUsage::StaticDraw:
+ case BufferUsage::StaticRead:
+ return true;
+ default:
+ return false;
+ }
+}
+
+std::ostream &operator<<(std::ostream &os, PrimitiveMode value)
+{
+ switch (value)
+ {
+ case PrimitiveMode::LineLoop:
+ os << "GL_LINE_LOOP";
+ break;
+ case PrimitiveMode::Lines:
+ os << "GL_LINES";
+ break;
+ case PrimitiveMode::LinesAdjacency:
+ os << "GL_LINES_ADJACENCY";
+ break;
+ case PrimitiveMode::LineStrip:
+ os << "GL_LINE_STRIP";
+ break;
+ case PrimitiveMode::LineStripAdjacency:
+ os << "GL_LINE_STRIP_ADJANCENCY";
+ break;
+ case PrimitiveMode::Patches:
+ os << "GL_PATCHES";
+ break;
+ case PrimitiveMode::Points:
+ os << "GL_POINTS";
+ break;
+ case PrimitiveMode::TriangleFan:
+ os << "GL_TRIANGLE_FAN";
+ break;
+ case PrimitiveMode::Triangles:
+ os << "GL_TRIANGLES";
+ break;
+ case PrimitiveMode::TrianglesAdjacency:
+ os << "GL_TRIANGLES_ADJANCENCY";
+ break;
+ case PrimitiveMode::TriangleStrip:
+ os << "GL_TRIANGLE_STRIP";
+ break;
+ case PrimitiveMode::TriangleStripAdjacency:
+ os << "GL_TRIANGLE_STRIP_ADJACENCY";
+ break;
+ default:
+ os << "GL_INVALID_ENUM";
+ break;
+ }
+ return os;
+}
+
+std::ostream &operator<<(std::ostream &os, DrawElementsType value)
+{
+ switch (value)
+ {
+ case DrawElementsType::UnsignedByte:
+ os << "GL_UNSIGNED_BYTE";
+ break;
+ case DrawElementsType::UnsignedShort:
+ os << "GL_UNSIGNED_SHORT";
+ break;
+ case DrawElementsType::UnsignedInt:
+ os << "GL_UNSIGNED_INT";
+ break;
+ default:
+ os << "GL_INVALID_ENUM";
+ break;
+ }
+
+ return os;
+}
+
+std::ostream &operator<<(std::ostream &os, BlendEquationType value)
+{
+ switch (value)
+ {
+ case BlendEquationType::Add:
+ os << "GL_FUNC_ADD";
+ break;
+ case BlendEquationType::Min:
+ os << "GL_MIN";
+ break;
+ case BlendEquationType::Max:
+ os << "GL_MAX";
+ break;
+ case BlendEquationType::Subtract:
+ os << "GL_FUNC_SUBTRACT";
+ break;
+ case BlendEquationType::ReverseSubtract:
+ os << "GL_FUNC_REVERSE_SUBTRACT";
+ break;
+ case BlendEquationType::Multiply:
+ os << "GL_MULTIPLY_KHR";
+ break;
+ case BlendEquationType::Screen:
+ os << "GL_SCREEN_KHR";
+ break;
+ case BlendEquationType::Overlay:
+ os << "GL_OVERLAY_KHR";
+ break;
+ case BlendEquationType::Darken:
+ os << "GL_DARKEN_KHR";
+ break;
+ case BlendEquationType::Lighten:
+ os << "GL_LIGHTEN_KHR";
+ break;
+ case BlendEquationType::Colordodge:
+ os << "GL_COLORDODGE_KHR";
+ break;
+ case BlendEquationType::Colorburn:
+ os << "GL_COLORBURN_KHR";
+ break;
+ case BlendEquationType::Hardlight:
+ os << "GL_HARDLIGHT_KHR";
+ break;
+ case BlendEquationType::Softlight:
+ os << "GL_SOFTLIGHT_KHR";
+ break;
+ case BlendEquationType::Difference:
+ os << "GL_DIFFERENCE_KHR";
+ break;
+ case BlendEquationType::Exclusion:
+ os << "GL_EXCLUSION_KHR";
+ break;
+ case BlendEquationType::HslHue:
+ os << "GL_HSL_HUE_KHR";
+ break;
+ case BlendEquationType::HslSaturation:
+ os << "GL_HSL_SATURATION_KHR";
+ break;
+ case BlendEquationType::HslColor:
+ os << "GL_HSL_COLOR_KHR";
+ break;
+ case BlendEquationType::HslLuminosity:
+ os << "GL_HSL_LUMINOSITY_KHR";
+ break;
+ default:
+ os << "GL_INVALID_ENUM";
+ break;
+ }
+
+ return os;
+}
+
+std::ostream &operator<<(std::ostream &os, BlendFactorType value)
+{
+ switch (value)
+ {
+ case BlendFactorType::Zero:
+ os << "GL_ZERO";
+ break;
+ case BlendFactorType::One:
+ os << "GL_ONE";
+ break;
+ case BlendFactorType::SrcColor:
+ os << "GL_SRC_COLOR";
+ break;
+ case BlendFactorType::OneMinusSrcColor:
+ os << "GL_ONE_MINUS_SRC_COLOR";
+ break;
+ case BlendFactorType::SrcAlpha:
+ os << "GL_SRC_ALPHA";
+ break;
+ case BlendFactorType::OneMinusSrcAlpha:
+ os << "GL_ONE_MINUS_SRC_ALPHA";
+ break;
+ case BlendFactorType::DstAlpha:
+ os << "GL_DST_ALPHA";
+ break;
+ case BlendFactorType::OneMinusDstAlpha:
+ os << "GL_ONE_MINUS_DST_ALPHA";
+ break;
+ case BlendFactorType::DstColor:
+ os << "GL_DST_COLOR";
+ break;
+ case BlendFactorType::OneMinusDstColor:
+ os << "GL_ONE_MINUS_DST_COLOR";
+ break;
+ case BlendFactorType::SrcAlphaSaturate:
+ os << "GL_SRC_ALPHA_SATURATE";
+ break;
+ case BlendFactorType::ConstantColor:
+ os << "GL_CONSTANT_COLOR";
+ break;
+ case BlendFactorType::OneMinusConstantColor:
+ os << "GL_ONE_MINUS_CONSTANT_COLOR";
+ break;
+ case BlendFactorType::ConstantAlpha:
+ os << "GL_CONSTANT_ALPHA";
+ break;
+ case BlendFactorType::OneMinusConstantAlpha:
+ os << "GL_ONE_MINUS_CONSTANT_ALPHA";
+ break;
+ case BlendFactorType::Src1Alpha:
+ os << "GL_SRC1_ALPHA_EXT";
+ break;
+ case BlendFactorType::Src1Color:
+ os << "GL_SRC1_COLOR_EXT";
+ break;
+ case BlendFactorType::OneMinusSrc1Color:
+ os << "GL_ONE_MINUS_SRC1_COLOR_EXT";
+ break;
+ case BlendFactorType::OneMinusSrc1Alpha:
+ os << "GL_ONE_MINUS_SRC1_ALPHA_EXT";
+ break;
+ default:
+ os << "GL_INVALID_ENUM";
+ break;
+ }
+
+ return os;
+}
+
+std::ostream &operator<<(std::ostream &os, VertexAttribType value)
+{
+ switch (value)
+ {
+ case VertexAttribType::Byte:
+ os << "GL_BYTE";
+ break;
+ case VertexAttribType::Fixed:
+ os << "GL_FIXED";
+ break;
+ case VertexAttribType::Float:
+ os << "GL_FLOAT";
+ break;
+ case VertexAttribType::HalfFloat:
+ os << "GL_HALF_FLOAT";
+ break;
+ case VertexAttribType::HalfFloatOES:
+ os << "GL_HALF_FLOAT_OES";
+ break;
+ case VertexAttribType::Int:
+ os << "GL_INT";
+ break;
+ case VertexAttribType::Int2101010:
+ os << "GL_INT_2_10_10_10_REV";
+ break;
+ case VertexAttribType::Int1010102:
+ os << "GL_INT_10_10_10_2_OES";
+ break;
+ case VertexAttribType::Short:
+ os << "GL_SHORT";
+ break;
+ case VertexAttribType::UnsignedByte:
+ os << "GL_UNSIGNED_BYTE";
+ break;
+ case VertexAttribType::UnsignedInt:
+ os << "GL_UNSIGNED_INT";
+ break;
+ case VertexAttribType::UnsignedInt2101010:
+ os << "GL_UNSIGNED_INT_2_10_10_10_REV";
+ break;
+ case VertexAttribType::UnsignedInt1010102:
+ os << "GL_UNSIGNED_INT_10_10_10_2_OES";
+ break;
+ case VertexAttribType::UnsignedShort:
+ os << "GL_UNSIGNED_SHORT";
+ break;
+ default:
+ os << "GL_INVALID_ENUM";
+ break;
+ }
+ return os;
+}
+
+std::ostream &operator<<(std::ostream &os, TessEvaluationType value)
+{
+ switch (value)
+ {
+ case TessEvaluationType::Triangles:
+ os << "GL_TRIANGLES";
+ break;
+ case TessEvaluationType::Quads:
+ os << "GL_QUADS";
+ break;
+ case TessEvaluationType::Isolines:
+ os << "GL_ISOLINES";
+ break;
+ case TessEvaluationType::EqualSpacing:
+ os << "GL_EQUAL";
+ break;
+ case TessEvaluationType::FractionalEvenSpacing:
+ os << "GL_FRACTIONAL_EVEN";
+ break;
+ case TessEvaluationType::FractionalOddSpacing:
+ os << "GL_FRACTIONAL_ODD";
+ break;
+ case TessEvaluationType::Cw:
+ os << "GL_CW";
+ break;
+ case TessEvaluationType::Ccw:
+ os << "GL_CCW";
+ break;
+ case TessEvaluationType::PointMode:
+ os << "GL_TESS_GEN_POINT_MODE";
+ break;
+ default:
+ os << "GL_INVALID_ENUM";
+ break;
+ }
+ return os;
+}
+
+const char *ShaderTypeToString(ShaderType shaderType)
+{
+ constexpr ShaderMap<const char *> kShaderTypeNameMap = {
+ {ShaderType::Vertex, "Vertex"},
+ {ShaderType::TessControl, "Tessellation control"},
+ {ShaderType::TessEvaluation, "Tessellation evaluation"},
+ {ShaderType::Geometry, "Geometry"},
+ {ShaderType::Fragment, "Fragment"},
+ {ShaderType::Compute, "Compute"}};
+ return kShaderTypeNameMap[shaderType];
+}
+
+bool operator<(const UniformLocation &lhs, const UniformLocation &rhs)
+{
+ return lhs.value < rhs.value;
+}
+
+bool IsEmulatedCompressedFormat(GLenum format)
+{
+ // TODO(anglebug.com/6177): Check for all formats ANGLE will use to emulate a compressed texture
+ return format == GL_RGBA || format == GL_RG || format == GL_RED;
+}
+} // namespace gl
+
+namespace egl
+{
+MessageType ErrorCodeToMessageType(EGLint errorCode)
+{
+ switch (errorCode)
+ {
+ case EGL_BAD_ALLOC:
+ case EGL_CONTEXT_LOST:
+ case EGL_NOT_INITIALIZED:
+ return MessageType::Critical;
+
+ case EGL_BAD_ACCESS:
+ case EGL_BAD_ATTRIBUTE:
+ case EGL_BAD_CONFIG:
+ case EGL_BAD_CONTEXT:
+ case EGL_BAD_CURRENT_SURFACE:
+ case EGL_BAD_DISPLAY:
+ case EGL_BAD_MATCH:
+ case EGL_BAD_NATIVE_PIXMAP:
+ case EGL_BAD_NATIVE_WINDOW:
+ case EGL_BAD_PARAMETER:
+ case EGL_BAD_SURFACE:
+ case EGL_BAD_STREAM_KHR:
+ case EGL_BAD_STATE_KHR:
+ case EGL_BAD_DEVICE_EXT:
+ return MessageType::Error;
+
+ case EGL_SUCCESS:
+ default:
+ UNREACHABLE();
+ return MessageType::InvalidEnum;
+ }
+}
+} // namespace egl
+
+namespace egl_gl
+{
+
+gl::TextureTarget EGLCubeMapTargetToCubeMapTarget(EGLenum eglTarget)
+{
+ ASSERT(egl::IsCubeMapTextureTarget(eglTarget));
+ return gl::CubeFaceIndexToTextureTarget(egl::CubeMapTextureTargetToLayerIndex(eglTarget));
+}
+
+gl::TextureTarget EGLImageTargetToTextureTarget(EGLenum eglTarget)
+{
+ switch (eglTarget)
+ {
+ case EGL_GL_TEXTURE_2D_KHR:
+ return gl::TextureTarget::_2D;
+
+ case EGL_GL_TEXTURE_CUBE_MAP_POSITIVE_X_KHR:
+ case EGL_GL_TEXTURE_CUBE_MAP_NEGATIVE_X_KHR:
+ case EGL_GL_TEXTURE_CUBE_MAP_POSITIVE_Y_KHR:
+ case EGL_GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_KHR:
+ case EGL_GL_TEXTURE_CUBE_MAP_POSITIVE_Z_KHR:
+ case EGL_GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_KHR:
+ return EGLCubeMapTargetToCubeMapTarget(eglTarget);
+
+ case EGL_GL_TEXTURE_3D_KHR:
+ return gl::TextureTarget::_3D;
+
+ default:
+ UNREACHABLE();
+ return gl::TextureTarget::InvalidEnum;
+ }
+}
+
+gl::TextureType EGLTextureTargetToTextureType(EGLenum eglTarget)
+{
+ switch (eglTarget)
+ {
+ case EGL_TEXTURE_2D:
+ return gl::TextureType::_2D;
+
+ case EGL_TEXTURE_RECTANGLE_ANGLE:
+ return gl::TextureType::Rectangle;
+
+ default:
+ UNREACHABLE();
+ return gl::TextureType::InvalidEnum;
+ }
+}
+} // namespace egl_gl
diff --git a/gfx/angle/checkout/src/common/PackedEnums.h b/gfx/angle/checkout/src/common/PackedEnums.h
new file mode 100644
index 0000000000..81fa6d6f42
--- /dev/null
+++ b/gfx/angle/checkout/src/common/PackedEnums.h
@@ -0,0 +1,859 @@
+// Copyright 2017 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// PackedGLEnums_autogen.h:
+// Declares ANGLE-specific enums classes for GLEnum and functions operating
+// on them.
+
+#ifndef COMMON_PACKEDGLENUMS_H_
+#define COMMON_PACKEDGLENUMS_H_
+
+#include "common/PackedEGLEnums_autogen.h"
+#include "common/PackedGLEnums_autogen.h"
+
+#include <array>
+#include <bitset>
+#include <cstddef>
+
+#include <EGL/egl.h>
+
+#include "common/bitset_utils.h"
+
+namespace angle
+{
+
+// Return the number of elements of a packed enum, including the InvalidEnum element.
+template <typename E>
+constexpr size_t EnumSize()
+{
+ using UnderlyingType = typename std::underlying_type<E>::type;
+ return static_cast<UnderlyingType>(E::EnumCount);
+}
+
+// Implementation of AllEnums which allows iterating over all the possible values for a packed enums
+// like so:
+// for (auto value : AllEnums<MyPackedEnum>()) {
+// // Do something with the enum.
+// }
+
+template <typename E>
+class EnumIterator final
+{
+ private:
+ using UnderlyingType = typename std::underlying_type<E>::type;
+
+ public:
+ EnumIterator(E value) : mValue(static_cast<UnderlyingType>(value)) {}
+ EnumIterator &operator++()
+ {
+ mValue++;
+ return *this;
+ }
+ bool operator==(const EnumIterator &other) const { return mValue == other.mValue; }
+ bool operator!=(const EnumIterator &other) const { return mValue != other.mValue; }
+ E operator*() const { return static_cast<E>(mValue); }
+
+ private:
+ UnderlyingType mValue;
+};
+
+template <typename E, size_t MaxSize = EnumSize<E>()>
+struct AllEnums
+{
+ EnumIterator<E> begin() const { return {static_cast<E>(0)}; }
+ EnumIterator<E> end() const { return {static_cast<E>(MaxSize)}; }
+};
+
+// PackedEnumMap<E, T> is like an std::array<T, E::EnumCount> but is indexed with enum values. It
+// implements all of the std::array interface except with enum values instead of indices.
+template <typename E, typename T, size_t MaxSize = EnumSize<E>()>
+class PackedEnumMap
+{
+ using UnderlyingType = typename std::underlying_type<E>::type;
+ using Storage = std::array<T, MaxSize>;
+
+ public:
+ using InitPair = std::pair<E, T>;
+
+ constexpr PackedEnumMap() = default;
+
+ constexpr PackedEnumMap(std::initializer_list<InitPair> init) : mPrivateData{}
+ {
+ // We use a for loop instead of range-for to work around a limitation in MSVC.
+ for (const InitPair *it = init.begin(); it != init.end(); ++it)
+ {
+ mPrivateData[static_cast<UnderlyingType>(it->first)] = it->second;
+ }
+ }
+
+ // types:
+ using value_type = T;
+ using pointer = T *;
+ using const_pointer = const T *;
+ using reference = T &;
+ using const_reference = const T &;
+
+ using size_type = size_t;
+ using difference_type = ptrdiff_t;
+
+ using iterator = typename Storage::iterator;
+ using const_iterator = typename Storage::const_iterator;
+ using reverse_iterator = std::reverse_iterator<iterator>;
+ using const_reverse_iterator = std::reverse_iterator<const_iterator>;
+
+ // No explicit construct/copy/destroy for aggregate type
+ void fill(const T &u) { mPrivateData.fill(u); }
+ void swap(PackedEnumMap<E, T, MaxSize> &a) noexcept { mPrivateData.swap(a.mPrivateData); }
+
+ // iterators:
+ iterator begin() noexcept { return mPrivateData.begin(); }
+ const_iterator begin() const noexcept { return mPrivateData.begin(); }
+ iterator end() noexcept { return mPrivateData.end(); }
+ const_iterator end() const noexcept { return mPrivateData.end(); }
+
+ reverse_iterator rbegin() noexcept { return mPrivateData.rbegin(); }
+ const_reverse_iterator rbegin() const noexcept { return mPrivateData.rbegin(); }
+ reverse_iterator rend() noexcept { return mPrivateData.rend(); }
+ const_reverse_iterator rend() const noexcept { return mPrivateData.rend(); }
+
+ // capacity:
+ constexpr size_type size() const noexcept { return mPrivateData.size(); }
+ constexpr size_type max_size() const noexcept { return mPrivateData.max_size(); }
+ constexpr bool empty() const noexcept { return mPrivateData.empty(); }
+
+ // element access:
+ reference operator[](E n)
+ {
+ ASSERT(static_cast<size_t>(n) < mPrivateData.size());
+ return mPrivateData[static_cast<UnderlyingType>(n)];
+ }
+
+ constexpr const_reference operator[](E n) const
+ {
+ ASSERT(static_cast<size_t>(n) < mPrivateData.size());
+ return mPrivateData[static_cast<UnderlyingType>(n)];
+ }
+
+ const_reference at(E n) const { return mPrivateData.at(static_cast<UnderlyingType>(n)); }
+ reference at(E n) { return mPrivateData.at(static_cast<UnderlyingType>(n)); }
+
+ reference front() { return mPrivateData.front(); }
+ const_reference front() const { return mPrivateData.front(); }
+ reference back() { return mPrivateData.back(); }
+ const_reference back() const { return mPrivateData.back(); }
+
+ T *data() noexcept { return mPrivateData.data(); }
+ const T *data() const noexcept { return mPrivateData.data(); }
+
+ bool operator==(const PackedEnumMap &rhs) const { return mPrivateData == rhs.mPrivateData; }
+ bool operator!=(const PackedEnumMap &rhs) const { return mPrivateData != rhs.mPrivateData; }
+
+ template <typename SubT = T>
+ typename std::enable_if<std::is_integral<SubT>::value>::type operator+=(
+ const PackedEnumMap<E, SubT, MaxSize> &rhs)
+ {
+ for (E e : AllEnums<E, MaxSize>())
+ {
+ at(e) += rhs[e];
+ }
+ }
+
+ private:
+ Storage mPrivateData;
+};
+
+// PackedEnumBitSetE> is like an std::bitset<E::EnumCount> but is indexed with enum values. It
+// implements the std::bitset interface except with enum values instead of indices.
+template <typename E, typename DataT = uint32_t>
+using PackedEnumBitSet = BitSetT<EnumSize<E>(), DataT, E>;
+
+} // namespace angle
+
+namespace gl
+{
+
+TextureType TextureTargetToType(TextureTarget target);
+TextureTarget NonCubeTextureTypeToTarget(TextureType type);
+
+TextureTarget CubeFaceIndexToTextureTarget(size_t face);
+size_t CubeMapTextureTargetToFaceIndex(TextureTarget target);
+bool IsCubeMapFaceTarget(TextureTarget target);
+
+constexpr TextureTarget kCubeMapTextureTargetMin = TextureTarget::CubeMapPositiveX;
+constexpr TextureTarget kCubeMapTextureTargetMax = TextureTarget::CubeMapNegativeZ;
+constexpr TextureTarget kAfterCubeMapTextureTargetMax =
+ static_cast<TextureTarget>(static_cast<uint8_t>(kCubeMapTextureTargetMax) + 1);
+struct AllCubeFaceTextureTargets
+{
+ angle::EnumIterator<TextureTarget> begin() const { return kCubeMapTextureTargetMin; }
+ angle::EnumIterator<TextureTarget> end() const { return kAfterCubeMapTextureTargetMax; }
+};
+
+constexpr std::array<ShaderType, 2> kAllGLES2ShaderTypes = {ShaderType::Vertex,
+ ShaderType::Fragment};
+
+constexpr ShaderType kShaderTypeMin = ShaderType::Vertex;
+constexpr ShaderType kShaderTypeMax = ShaderType::Compute;
+constexpr ShaderType kAfterShaderTypeMax =
+ static_cast<ShaderType>(static_cast<uint8_t>(kShaderTypeMax) + 1);
+struct AllShaderTypes
+{
+ angle::EnumIterator<ShaderType> begin() const { return kShaderTypeMin; }
+ angle::EnumIterator<ShaderType> end() const { return kAfterShaderTypeMax; }
+};
+
+constexpr size_t kGraphicsShaderCount = static_cast<size_t>(ShaderType::EnumCount) - 1u;
+// Arrange the shader types in the order of rendering pipeline
+constexpr std::array<ShaderType, kGraphicsShaderCount> kAllGraphicsShaderTypes = {
+ ShaderType::Vertex, ShaderType::TessControl, ShaderType::TessEvaluation, ShaderType::Geometry,
+ ShaderType::Fragment};
+
+using ShaderBitSet = angle::PackedEnumBitSet<ShaderType, uint8_t>;
+static_assert(sizeof(ShaderBitSet) == sizeof(uint8_t), "Unexpected size");
+
+template <typename T>
+using ShaderMap = angle::PackedEnumMap<ShaderType, T>;
+
+const char *ShaderTypeToString(ShaderType shaderType);
+
+TextureType SamplerTypeToTextureType(GLenum samplerType);
+TextureType ImageTypeToTextureType(GLenum imageType);
+
+bool IsMultisampled(gl::TextureType type);
+bool IsArrayTextureType(gl::TextureType type);
+
+bool IsStaticBufferUsage(BufferUsage useage);
+
+enum class PrimitiveMode : uint8_t
+{
+ Points = 0x0,
+ Lines = 0x1,
+ LineLoop = 0x2,
+ LineStrip = 0x3,
+ Triangles = 0x4,
+ TriangleStrip = 0x5,
+ TriangleFan = 0x6,
+ Unused1 = 0x7,
+ Unused2 = 0x8,
+ Unused3 = 0x9,
+ LinesAdjacency = 0xA,
+ LineStripAdjacency = 0xB,
+ TrianglesAdjacency = 0xC,
+ TriangleStripAdjacency = 0xD,
+ Patches = 0xE,
+
+ InvalidEnum = 0xF,
+ EnumCount = 0xF,
+};
+
+template <>
+constexpr PrimitiveMode FromGLenum<PrimitiveMode>(GLenum from)
+{
+ if (from >= static_cast<GLenum>(PrimitiveMode::EnumCount))
+ {
+ return PrimitiveMode::InvalidEnum;
+ }
+
+ return static_cast<PrimitiveMode>(from);
+}
+
+constexpr GLenum ToGLenum(PrimitiveMode from)
+{
+ return static_cast<GLenum>(from);
+}
+
+static_assert(ToGLenum(PrimitiveMode::Points) == GL_POINTS, "PrimitiveMode violation");
+static_assert(ToGLenum(PrimitiveMode::Lines) == GL_LINES, "PrimitiveMode violation");
+static_assert(ToGLenum(PrimitiveMode::LineLoop) == GL_LINE_LOOP, "PrimitiveMode violation");
+static_assert(ToGLenum(PrimitiveMode::LineStrip) == GL_LINE_STRIP, "PrimitiveMode violation");
+static_assert(ToGLenum(PrimitiveMode::Triangles) == GL_TRIANGLES, "PrimitiveMode violation");
+static_assert(ToGLenum(PrimitiveMode::TriangleStrip) == GL_TRIANGLE_STRIP,
+ "PrimitiveMode violation");
+static_assert(ToGLenum(PrimitiveMode::TriangleFan) == GL_TRIANGLE_FAN, "PrimitiveMode violation");
+static_assert(ToGLenum(PrimitiveMode::LinesAdjacency) == GL_LINES_ADJACENCY,
+ "PrimitiveMode violation");
+static_assert(ToGLenum(PrimitiveMode::LineStripAdjacency) == GL_LINE_STRIP_ADJACENCY,
+ "PrimitiveMode violation");
+static_assert(ToGLenum(PrimitiveMode::TrianglesAdjacency) == GL_TRIANGLES_ADJACENCY,
+ "PrimitiveMode violation");
+static_assert(ToGLenum(PrimitiveMode::TriangleStripAdjacency) == GL_TRIANGLE_STRIP_ADJACENCY,
+ "PrimitiveMode violation");
+
+std::ostream &operator<<(std::ostream &os, PrimitiveMode value);
+
+enum class DrawElementsType : size_t
+{
+ UnsignedByte = 0,
+ UnsignedShort = 1,
+ UnsignedInt = 2,
+ InvalidEnum = 3,
+ EnumCount = 3,
+};
+
+template <>
+constexpr DrawElementsType FromGLenum<DrawElementsType>(GLenum from)
+{
+
+ GLenum scaled = (from - GL_UNSIGNED_BYTE);
+ // This code sequence generates a ROR instruction on x86/arm. We want to check if the lowest bit
+ // of scaled is set and if (scaled >> 1) is greater than a non-pot value. If we rotate the
+ // lowest bit to the hightest bit both conditions can be checked with a single test.
+ static_assert(sizeof(GLenum) == 4, "Update (scaled << 31) to sizeof(GLenum) * 8 - 1");
+ GLenum packed = (scaled >> 1) | (scaled << 31);
+
+ // operator ? with a simple assignment usually translates to a cmov instruction and thus avoids
+ // a branch.
+ packed = (packed >= static_cast<GLenum>(DrawElementsType::EnumCount))
+ ? static_cast<GLenum>(DrawElementsType::InvalidEnum)
+ : packed;
+
+ return static_cast<DrawElementsType>(packed);
+}
+
+constexpr GLenum ToGLenum(DrawElementsType from)
+{
+ return ((static_cast<GLenum>(from) << 1) + GL_UNSIGNED_BYTE);
+}
+
+#define ANGLE_VALIDATE_PACKED_ENUM(type, packed, glenum) \
+ static_assert(ToGLenum(type::packed) == glenum, #type " violation"); \
+ static_assert(FromGLenum<type>(glenum) == type::packed, #type " violation")
+
+ANGLE_VALIDATE_PACKED_ENUM(DrawElementsType, UnsignedByte, GL_UNSIGNED_BYTE);
+ANGLE_VALIDATE_PACKED_ENUM(DrawElementsType, UnsignedShort, GL_UNSIGNED_SHORT);
+ANGLE_VALIDATE_PACKED_ENUM(DrawElementsType, UnsignedInt, GL_UNSIGNED_INT);
+
+std::ostream &operator<<(std::ostream &os, DrawElementsType value);
+
+enum class BlendEquationType
+{
+ Add = 0, // GLenum == 0x8006
+ Min = 1, // GLenum == 0x8007
+ Max = 2, // GLenum == 0x8008
+ Unused = 3,
+ Subtract = 4, // GLenum == 0x800A
+ ReverseSubtract = 5, // GLenum == 0x800B
+
+ Multiply = 6, // GLenum == 0x9294
+ Screen = 7, // GLenum == 0x9295
+ Overlay = 8, // GLenum == 0x9296
+ Darken = 9, // GLenum == 0x9297
+ Lighten = 10, // GLenum == 0x9298
+ Colordodge = 11, // GLenum == 0x9299
+ Colorburn = 12, // GLenum == 0x929A
+ Hardlight = 13, // GLenum == 0x929B
+ Softlight = 14, // GLenum == 0x929C
+ Unused2 = 15,
+ Difference = 16, // GLenum == 0x929E
+ Unused3 = 17,
+ Exclusion = 18, // GLenum == 0x92A0
+
+ HslHue = 19, // GLenum == 0x92AD
+ HslSaturation = 20, // GLenum == 0x92AE
+ HslColor = 21, // GLenum == 0x92AF
+ HslLuminosity = 22, // GLenum == 0x92B0
+
+ InvalidEnum = 23,
+ EnumCount = InvalidEnum
+};
+
+using BlendEquationBitSet = angle::PackedEnumBitSet<gl::BlendEquationType>;
+
+template <>
+constexpr BlendEquationType FromGLenum<BlendEquationType>(GLenum from)
+{
+ if (from <= GL_FUNC_REVERSE_SUBTRACT)
+ {
+ const GLenum scaled = (from - GL_FUNC_ADD);
+ return (scaled == static_cast<GLenum>(BlendEquationType::Unused))
+ ? BlendEquationType::InvalidEnum
+ : static_cast<BlendEquationType>(scaled);
+ }
+ if (from <= GL_EXCLUSION_KHR)
+ {
+ const GLenum scaled =
+ (from - GL_MULTIPLY_KHR + static_cast<uint32_t>(BlendEquationType::Multiply));
+ return (scaled == static_cast<GLenum>(BlendEquationType::Unused2) ||
+ scaled == static_cast<GLenum>(BlendEquationType::Unused3))
+ ? BlendEquationType::InvalidEnum
+ : static_cast<BlendEquationType>(scaled);
+ }
+ if (from <= GL_HSL_LUMINOSITY_KHR)
+ {
+ return static_cast<BlendEquationType>(from - GL_HSL_HUE_KHR +
+ static_cast<uint32_t>(BlendEquationType::HslHue));
+ }
+ return BlendEquationType::InvalidEnum;
+}
+
+constexpr GLenum ToGLenum(BlendEquationType from)
+{
+ if (from <= BlendEquationType::ReverseSubtract)
+ {
+ return static_cast<GLenum>(from) + GL_FUNC_ADD;
+ }
+ if (from <= BlendEquationType::Exclusion)
+ {
+ return static_cast<GLenum>(from) - static_cast<GLenum>(BlendEquationType::Multiply) +
+ GL_MULTIPLY_KHR;
+ }
+ return static_cast<GLenum>(from) - static_cast<GLenum>(BlendEquationType::HslHue) +
+ GL_HSL_HUE_KHR;
+}
+
+ANGLE_VALIDATE_PACKED_ENUM(BlendEquationType, Add, GL_FUNC_ADD);
+ANGLE_VALIDATE_PACKED_ENUM(BlendEquationType, Min, GL_MIN);
+ANGLE_VALIDATE_PACKED_ENUM(BlendEquationType, Max, GL_MAX);
+ANGLE_VALIDATE_PACKED_ENUM(BlendEquationType, Subtract, GL_FUNC_SUBTRACT);
+ANGLE_VALIDATE_PACKED_ENUM(BlendEquationType, ReverseSubtract, GL_FUNC_REVERSE_SUBTRACT);
+ANGLE_VALIDATE_PACKED_ENUM(BlendEquationType, Multiply, GL_MULTIPLY_KHR);
+ANGLE_VALIDATE_PACKED_ENUM(BlendEquationType, Screen, GL_SCREEN_KHR);
+ANGLE_VALIDATE_PACKED_ENUM(BlendEquationType, Overlay, GL_OVERLAY_KHR);
+ANGLE_VALIDATE_PACKED_ENUM(BlendEquationType, Darken, GL_DARKEN_KHR);
+ANGLE_VALIDATE_PACKED_ENUM(BlendEquationType, Lighten, GL_LIGHTEN_KHR);
+ANGLE_VALIDATE_PACKED_ENUM(BlendEquationType, Colordodge, GL_COLORDODGE_KHR);
+ANGLE_VALIDATE_PACKED_ENUM(BlendEquationType, Colorburn, GL_COLORBURN_KHR);
+ANGLE_VALIDATE_PACKED_ENUM(BlendEquationType, Hardlight, GL_HARDLIGHT_KHR);
+ANGLE_VALIDATE_PACKED_ENUM(BlendEquationType, Softlight, GL_SOFTLIGHT_KHR);
+ANGLE_VALIDATE_PACKED_ENUM(BlendEquationType, Difference, GL_DIFFERENCE_KHR);
+ANGLE_VALIDATE_PACKED_ENUM(BlendEquationType, Exclusion, GL_EXCLUSION_KHR);
+ANGLE_VALIDATE_PACKED_ENUM(BlendEquationType, HslHue, GL_HSL_HUE_KHR);
+ANGLE_VALIDATE_PACKED_ENUM(BlendEquationType, HslSaturation, GL_HSL_SATURATION_KHR);
+ANGLE_VALIDATE_PACKED_ENUM(BlendEquationType, HslColor, GL_HSL_COLOR_KHR);
+ANGLE_VALIDATE_PACKED_ENUM(BlendEquationType, HslLuminosity, GL_HSL_LUMINOSITY_KHR);
+
+std::ostream &operator<<(std::ostream &os, BlendEquationType value);
+
+enum class BlendFactorType
+{
+ Zero = 0, // GLenum == 0
+ One = 1, // GLenum == 1
+
+ MinSrcDstType = 2,
+ SrcColor = 2, // GLenum == 0x0300
+ OneMinusSrcColor = 3, // GLenum == 0x0301
+ SrcAlpha = 4, // GLenum == 0x0302
+ OneMinusSrcAlpha = 5, // GLenum == 0x0303
+ DstAlpha = 6, // GLenum == 0x0304
+ OneMinusDstAlpha = 7, // GLenum == 0x0305
+ DstColor = 8, // GLenum == 0x0306
+ OneMinusDstColor = 9, // GLenum == 0x0307
+ SrcAlphaSaturate = 10, // GLenum == 0x0308
+ MaxSrcDstType = 10,
+
+ MinConstantType = 11,
+ ConstantColor = 11, // GLenum == 0x8001
+ OneMinusConstantColor = 12, // GLenum == 0x8002
+ ConstantAlpha = 13, // GLenum == 0x8003
+ OneMinusConstantAlpha = 14, // GLenum == 0x8004
+ MaxConstantType = 14,
+
+ // GL_EXT_blend_func_extended
+
+ Src1Alpha = 15, // GLenum == 0x8589
+
+ Src1Color = 16, // GLenum == 0x88F9
+ OneMinusSrc1Color = 17, // GLenum == 0x88FA
+ OneMinusSrc1Alpha = 18, // GLenum == 0x88FB
+
+ InvalidEnum = 19,
+ EnumCount = 19
+};
+
+template <>
+constexpr BlendFactorType FromGLenum<BlendFactorType>(GLenum from)
+{
+ if (from <= 1)
+ return static_cast<BlendFactorType>(from);
+ if (from >= GL_SRC_COLOR && from <= GL_SRC_ALPHA_SATURATE)
+ return static_cast<BlendFactorType>(from - GL_SRC_COLOR + 2);
+ if (from >= GL_CONSTANT_COLOR && from <= GL_ONE_MINUS_CONSTANT_ALPHA)
+ return static_cast<BlendFactorType>(from - GL_CONSTANT_COLOR + 11);
+ if (from == GL_SRC1_ALPHA_EXT)
+ return BlendFactorType::Src1Alpha;
+ if (from >= GL_SRC1_COLOR_EXT && from <= GL_ONE_MINUS_SRC1_ALPHA_EXT)
+ return static_cast<BlendFactorType>(from - GL_SRC1_COLOR_EXT + 16);
+ return BlendFactorType::InvalidEnum;
+}
+
+constexpr GLenum ToGLenum(BlendFactorType from)
+{
+ const GLenum value = static_cast<GLenum>(from);
+ if (value <= 1)
+ return value;
+ if (from >= BlendFactorType::MinSrcDstType && from <= BlendFactorType::MaxSrcDstType)
+ return value - 2 + GL_SRC_COLOR;
+ if (from >= BlendFactorType::MinConstantType && from <= BlendFactorType::MaxConstantType)
+ return value - 11 + GL_CONSTANT_COLOR;
+ if (from == BlendFactorType::Src1Alpha)
+ return GL_SRC1_ALPHA_EXT;
+ return value - 16 + GL_SRC1_COLOR_EXT;
+}
+
+ANGLE_VALIDATE_PACKED_ENUM(BlendFactorType, Zero, GL_ZERO);
+ANGLE_VALIDATE_PACKED_ENUM(BlendFactorType, One, GL_ONE);
+ANGLE_VALIDATE_PACKED_ENUM(BlendFactorType, SrcColor, GL_SRC_COLOR);
+ANGLE_VALIDATE_PACKED_ENUM(BlendFactorType, OneMinusSrcColor, GL_ONE_MINUS_SRC_COLOR);
+ANGLE_VALIDATE_PACKED_ENUM(BlendFactorType, SrcAlpha, GL_SRC_ALPHA);
+ANGLE_VALIDATE_PACKED_ENUM(BlendFactorType, OneMinusSrcAlpha, GL_ONE_MINUS_SRC_ALPHA);
+ANGLE_VALIDATE_PACKED_ENUM(BlendFactorType, DstAlpha, GL_DST_ALPHA);
+ANGLE_VALIDATE_PACKED_ENUM(BlendFactorType, OneMinusDstAlpha, GL_ONE_MINUS_DST_ALPHA);
+ANGLE_VALIDATE_PACKED_ENUM(BlendFactorType, DstColor, GL_DST_COLOR);
+ANGLE_VALIDATE_PACKED_ENUM(BlendFactorType, OneMinusDstColor, GL_ONE_MINUS_DST_COLOR);
+ANGLE_VALIDATE_PACKED_ENUM(BlendFactorType, SrcAlphaSaturate, GL_SRC_ALPHA_SATURATE);
+ANGLE_VALIDATE_PACKED_ENUM(BlendFactorType, ConstantColor, GL_CONSTANT_COLOR);
+ANGLE_VALIDATE_PACKED_ENUM(BlendFactorType, OneMinusConstantColor, GL_ONE_MINUS_CONSTANT_COLOR);
+ANGLE_VALIDATE_PACKED_ENUM(BlendFactorType, ConstantAlpha, GL_CONSTANT_ALPHA);
+ANGLE_VALIDATE_PACKED_ENUM(BlendFactorType, OneMinusConstantAlpha, GL_ONE_MINUS_CONSTANT_ALPHA);
+ANGLE_VALIDATE_PACKED_ENUM(BlendFactorType, Src1Alpha, GL_SRC1_ALPHA_EXT);
+ANGLE_VALIDATE_PACKED_ENUM(BlendFactorType, Src1Color, GL_SRC1_COLOR_EXT);
+ANGLE_VALIDATE_PACKED_ENUM(BlendFactorType, OneMinusSrc1Color, GL_ONE_MINUS_SRC1_COLOR_EXT);
+ANGLE_VALIDATE_PACKED_ENUM(BlendFactorType, OneMinusSrc1Alpha, GL_ONE_MINUS_SRC1_ALPHA_EXT);
+
+std::ostream &operator<<(std::ostream &os, BlendFactorType value);
+
+enum class VertexAttribType
+{
+ Byte = 0, // GLenum == 0x1400
+ UnsignedByte = 1, // GLenum == 0x1401
+ Short = 2, // GLenum == 0x1402
+ UnsignedShort = 3, // GLenum == 0x1403
+ Int = 4, // GLenum == 0x1404
+ UnsignedInt = 5, // GLenum == 0x1405
+ Float = 6, // GLenum == 0x1406
+ Unused1 = 7, // GLenum == 0x1407
+ Unused2 = 8, // GLenum == 0x1408
+ Unused3 = 9, // GLenum == 0x1409
+ Unused4 = 10, // GLenum == 0x140A
+ HalfFloat = 11, // GLenum == 0x140B
+ Fixed = 12, // GLenum == 0x140C
+ MaxBasicType = 12,
+ UnsignedInt2101010 = 13, // GLenum == 0x8368
+ HalfFloatOES = 14, // GLenum == 0x8D61
+ Int2101010 = 15, // GLenum == 0x8D9F
+ UnsignedInt1010102 = 16, // GLenum == 0x8DF6
+ Int1010102 = 17, // GLenum == 0x8DF7
+ InvalidEnum = 18,
+ EnumCount = 18,
+};
+
+template <>
+constexpr VertexAttribType FromGLenum<VertexAttribType>(GLenum from)
+{
+ GLenum packed = from - GL_BYTE;
+ if (packed <= static_cast<GLenum>(VertexAttribType::MaxBasicType))
+ return static_cast<VertexAttribType>(packed);
+ if (from == GL_UNSIGNED_INT_2_10_10_10_REV)
+ return VertexAttribType::UnsignedInt2101010;
+ if (from == GL_HALF_FLOAT_OES)
+ return VertexAttribType::HalfFloatOES;
+ if (from == GL_INT_2_10_10_10_REV)
+ return VertexAttribType::Int2101010;
+ if (from == GL_UNSIGNED_INT_10_10_10_2_OES)
+ return VertexAttribType::UnsignedInt1010102;
+ if (from == GL_INT_10_10_10_2_OES)
+ return VertexAttribType::Int1010102;
+ return VertexAttribType::InvalidEnum;
+}
+
+constexpr GLenum ToGLenum(VertexAttribType from)
+{
+ // This could be optimized using a constexpr table.
+ if (from == VertexAttribType::Int2101010)
+ return GL_INT_2_10_10_10_REV;
+ if (from == VertexAttribType::HalfFloatOES)
+ return GL_HALF_FLOAT_OES;
+ if (from == VertexAttribType::UnsignedInt2101010)
+ return GL_UNSIGNED_INT_2_10_10_10_REV;
+ if (from == VertexAttribType::UnsignedInt1010102)
+ return GL_UNSIGNED_INT_10_10_10_2_OES;
+ if (from == VertexAttribType::Int1010102)
+ return GL_INT_10_10_10_2_OES;
+ return static_cast<GLenum>(from) + GL_BYTE;
+}
+
+ANGLE_VALIDATE_PACKED_ENUM(VertexAttribType, Byte, GL_BYTE);
+ANGLE_VALIDATE_PACKED_ENUM(VertexAttribType, UnsignedByte, GL_UNSIGNED_BYTE);
+ANGLE_VALIDATE_PACKED_ENUM(VertexAttribType, Short, GL_SHORT);
+ANGLE_VALIDATE_PACKED_ENUM(VertexAttribType, UnsignedShort, GL_UNSIGNED_SHORT);
+ANGLE_VALIDATE_PACKED_ENUM(VertexAttribType, Int, GL_INT);
+ANGLE_VALIDATE_PACKED_ENUM(VertexAttribType, UnsignedInt, GL_UNSIGNED_INT);
+ANGLE_VALIDATE_PACKED_ENUM(VertexAttribType, Float, GL_FLOAT);
+ANGLE_VALIDATE_PACKED_ENUM(VertexAttribType, HalfFloat, GL_HALF_FLOAT);
+ANGLE_VALIDATE_PACKED_ENUM(VertexAttribType, Fixed, GL_FIXED);
+ANGLE_VALIDATE_PACKED_ENUM(VertexAttribType, Int2101010, GL_INT_2_10_10_10_REV);
+ANGLE_VALIDATE_PACKED_ENUM(VertexAttribType, HalfFloatOES, GL_HALF_FLOAT_OES);
+ANGLE_VALIDATE_PACKED_ENUM(VertexAttribType, UnsignedInt2101010, GL_UNSIGNED_INT_2_10_10_10_REV);
+ANGLE_VALIDATE_PACKED_ENUM(VertexAttribType, Int1010102, GL_INT_10_10_10_2_OES);
+ANGLE_VALIDATE_PACKED_ENUM(VertexAttribType, UnsignedInt1010102, GL_UNSIGNED_INT_10_10_10_2_OES);
+
+std::ostream &operator<<(std::ostream &os, VertexAttribType value);
+
+enum class TessEvaluationType
+{
+ Triangles = 0,
+ Quads = 1,
+ Isolines = 2,
+ EqualSpacing = 3,
+ FractionalEvenSpacing = 4,
+ FractionalOddSpacing = 5,
+ Cw = 6,
+ Ccw = 7,
+ PointMode = 8,
+ InvalidEnum = 9,
+ EnumCount = 9
+};
+
+template <>
+constexpr TessEvaluationType FromGLenum<TessEvaluationType>(GLenum from)
+{
+ if (from == GL_TRIANGLES)
+ return TessEvaluationType::Triangles;
+ if (from == GL_QUADS)
+ return TessEvaluationType::Quads;
+ if (from == GL_ISOLINES)
+ return TessEvaluationType::Isolines;
+ if (from == GL_EQUAL)
+ return TessEvaluationType::EqualSpacing;
+ if (from == GL_FRACTIONAL_EVEN)
+ return TessEvaluationType::FractionalEvenSpacing;
+ if (from == GL_FRACTIONAL_ODD)
+ return TessEvaluationType::FractionalOddSpacing;
+ if (from == GL_CW)
+ return TessEvaluationType::Cw;
+ if (from == GL_CCW)
+ return TessEvaluationType::Ccw;
+ if (from == GL_TESS_GEN_POINT_MODE)
+ return TessEvaluationType::PointMode;
+ return TessEvaluationType::InvalidEnum;
+}
+
+constexpr GLenum ToGLenum(TessEvaluationType from)
+{
+ switch (from)
+ {
+ case TessEvaluationType::Triangles:
+ return GL_TRIANGLES;
+ case TessEvaluationType::Quads:
+ return GL_QUADS;
+ case TessEvaluationType::Isolines:
+ return GL_ISOLINES;
+ case TessEvaluationType::EqualSpacing:
+ return GL_EQUAL;
+ case TessEvaluationType::FractionalEvenSpacing:
+ return GL_FRACTIONAL_EVEN;
+ case TessEvaluationType::FractionalOddSpacing:
+ return GL_FRACTIONAL_ODD;
+ case TessEvaluationType::Cw:
+ return GL_CW;
+ case TessEvaluationType::Ccw:
+ return GL_CCW;
+ case TessEvaluationType::PointMode:
+ return GL_TESS_GEN_POINT_MODE;
+ default:
+ return GL_INVALID_ENUM;
+ }
+}
+
+ANGLE_VALIDATE_PACKED_ENUM(TessEvaluationType, Triangles, GL_TRIANGLES);
+ANGLE_VALIDATE_PACKED_ENUM(TessEvaluationType, Quads, GL_QUADS);
+ANGLE_VALIDATE_PACKED_ENUM(TessEvaluationType, Isolines, GL_ISOLINES);
+ANGLE_VALIDATE_PACKED_ENUM(TessEvaluationType, EqualSpacing, GL_EQUAL);
+ANGLE_VALIDATE_PACKED_ENUM(TessEvaluationType, FractionalEvenSpacing, GL_FRACTIONAL_EVEN);
+ANGLE_VALIDATE_PACKED_ENUM(TessEvaluationType, FractionalOddSpacing, GL_FRACTIONAL_ODD);
+ANGLE_VALIDATE_PACKED_ENUM(TessEvaluationType, Cw, GL_CW);
+ANGLE_VALIDATE_PACKED_ENUM(TessEvaluationType, Ccw, GL_CCW);
+ANGLE_VALIDATE_PACKED_ENUM(TessEvaluationType, PointMode, GL_TESS_GEN_POINT_MODE);
+
+std::ostream &operator<<(std::ostream &os, TessEvaluationType value);
+
+// Typesafe object handles.
+
+template <typename T>
+struct ResourceTypeToID;
+
+template <typename T>
+struct IsResourceIDType;
+
+// Clang Format doesn't like the following X macro.
+// clang-format off
+#define ANGLE_ID_TYPES_OP(X) \
+ X(Buffer) \
+ X(FenceNV) \
+ X(Framebuffer) \
+ X(MemoryObject) \
+ X(Path) \
+ X(ProgramPipeline) \
+ X(Query) \
+ X(Renderbuffer) \
+ X(Sampler) \
+ X(Semaphore) \
+ X(Texture) \
+ X(TransformFeedback) \
+ X(VertexArray)
+// clang-format on
+
+#define ANGLE_DEFINE_ID_TYPE(Type) \
+ class Type; \
+ struct Type##ID \
+ { \
+ GLuint value; \
+ }; \
+ template <> \
+ struct ResourceTypeToID<Type> \
+ { \
+ using IDType = Type##ID; \
+ }; \
+ template <> \
+ struct IsResourceIDType<Type##ID> \
+ { \
+ static constexpr bool value = true; \
+ };
+
+ANGLE_ID_TYPES_OP(ANGLE_DEFINE_ID_TYPE)
+
+#undef ANGLE_DEFINE_ID_TYPE
+#undef ANGLE_ID_TYPES_OP
+
+// Shaders and programs are a bit special as they share IDs.
+struct ShaderProgramID
+{
+ GLuint value;
+};
+
+template <>
+struct IsResourceIDType<ShaderProgramID>
+{
+ constexpr static bool value = true;
+};
+
+class Shader;
+template <>
+struct ResourceTypeToID<Shader>
+{
+ using IDType = ShaderProgramID;
+};
+
+class Program;
+template <>
+struct ResourceTypeToID<Program>
+{
+ using IDType = ShaderProgramID;
+};
+
+template <typename T>
+struct ResourceTypeToID
+{
+ using IDType = void;
+};
+
+template <typename T>
+struct IsResourceIDType
+{
+ static constexpr bool value = false;
+};
+
+template <typename T>
+bool ValueEquals(T lhs, T rhs)
+{
+ return lhs.value == rhs.value;
+}
+
+// Util funcs for resourceIDs
+template <typename T>
+typename std::enable_if<IsResourceIDType<T>::value, bool>::type operator==(const T &lhs,
+ const T &rhs)
+{
+ return lhs.value == rhs.value;
+}
+
+template <typename T>
+typename std::enable_if<IsResourceIDType<T>::value, bool>::type operator!=(const T &lhs,
+ const T &rhs)
+{
+ return lhs.value != rhs.value;
+}
+
+template <typename T>
+typename std::enable_if<IsResourceIDType<T>::value, bool>::type operator<(const T &lhs,
+ const T &rhs)
+{
+ return lhs.value < rhs.value;
+}
+
+// Used to unbox typed values.
+template <typename ResourceIDType>
+GLuint GetIDValue(ResourceIDType id);
+
+template <>
+inline GLuint GetIDValue(GLuint id)
+{
+ return id;
+}
+
+template <typename ResourceIDType>
+inline GLuint GetIDValue(ResourceIDType id)
+{
+ return id.value;
+}
+
+// First case: handling packed enums.
+template <typename EnumT, typename FromT>
+typename std::enable_if<std::is_enum<EnumT>::value, EnumT>::type PackParam(FromT from)
+{
+ return FromGLenum<EnumT>(from);
+}
+
+// Second case: handling non-pointer resource ids.
+template <typename EnumT, typename FromT>
+typename std::enable_if<!std::is_pointer<FromT>::value && !std::is_enum<EnumT>::value, EnumT>::type
+PackParam(FromT from)
+{
+ return {from};
+}
+
+// Third case: handling pointer resource ids.
+template <typename EnumT, typename FromT>
+typename std::enable_if<std::is_pointer<FromT>::value && !std::is_enum<EnumT>::value, EnumT>::type
+PackParam(FromT from)
+{
+ static_assert(sizeof(typename std::remove_pointer<EnumT>::type) ==
+ sizeof(typename std::remove_pointer<FromT>::type),
+ "Types have different sizes");
+ static_assert(
+ std::is_same<
+ decltype(std::remove_pointer<EnumT>::type::value),
+ typename std::remove_const<typename std::remove_pointer<FromT>::type>::type>::value,
+ "Data types are different");
+ return reinterpret_cast<EnumT>(from);
+}
+
+struct UniformLocation
+{
+ int value;
+};
+
+bool operator<(const UniformLocation &lhs, const UniformLocation &rhs);
+
+struct UniformBlockIndex
+{
+ uint32_t value;
+};
+
+bool IsEmulatedCompressedFormat(GLenum format);
+} // namespace gl
+
+namespace egl
+{
+MessageType ErrorCodeToMessageType(EGLint errorCode);
+} // namespace egl
+
+namespace egl_gl
+{
+gl::TextureTarget EGLCubeMapTargetToCubeMapTarget(EGLenum eglTarget);
+gl::TextureTarget EGLImageTargetToTextureTarget(EGLenum eglTarget);
+gl::TextureType EGLTextureTargetToTextureType(EGLenum eglTarget);
+} // namespace egl_gl
+
+#endif // COMMON_PACKEDGLENUMS_H_
diff --git a/gfx/angle/checkout/src/common/PackedGLEnums_autogen.cpp b/gfx/angle/checkout/src/common/PackedGLEnums_autogen.cpp
new file mode 100644
index 0000000000..d025b11a14
--- /dev/null
+++ b/gfx/angle/checkout/src/common/PackedGLEnums_autogen.cpp
@@ -0,0 +1,2449 @@
+// GENERATED FILE - DO NOT EDIT.
+// Generated by gen_packed_gl_enums.py using data from packed_gl_enums.json.
+//
+// Copyright 2017 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// PackedGLEnums_autogen.cpp:
+// Implements ANGLE-specific enums classes for GLenums and functions operating
+// on them.
+
+#include "common/PackedGLEnums_autogen.h"
+#include "common/debug.h"
+
+namespace gl
+{
+
+template <>
+AlphaTestFunc FromGLenum<AlphaTestFunc>(GLenum from)
+{
+ switch (from)
+ {
+ case GL_ALWAYS:
+ return AlphaTestFunc::AlwaysPass;
+ case GL_EQUAL:
+ return AlphaTestFunc::Equal;
+ case GL_GEQUAL:
+ return AlphaTestFunc::Gequal;
+ case GL_GREATER:
+ return AlphaTestFunc::Greater;
+ case GL_LEQUAL:
+ return AlphaTestFunc::Lequal;
+ case GL_LESS:
+ return AlphaTestFunc::Less;
+ case GL_NEVER:
+ return AlphaTestFunc::Never;
+ case GL_NOTEQUAL:
+ return AlphaTestFunc::NotEqual;
+ default:
+ return AlphaTestFunc::InvalidEnum;
+ }
+}
+
+GLenum ToGLenum(AlphaTestFunc from)
+{
+ switch (from)
+ {
+ case AlphaTestFunc::AlwaysPass:
+ return GL_ALWAYS;
+ case AlphaTestFunc::Equal:
+ return GL_EQUAL;
+ case AlphaTestFunc::Gequal:
+ return GL_GEQUAL;
+ case AlphaTestFunc::Greater:
+ return GL_GREATER;
+ case AlphaTestFunc::Lequal:
+ return GL_LEQUAL;
+ case AlphaTestFunc::Less:
+ return GL_LESS;
+ case AlphaTestFunc::Never:
+ return GL_NEVER;
+ case AlphaTestFunc::NotEqual:
+ return GL_NOTEQUAL;
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+}
+
+std::ostream &operator<<(std::ostream &os, AlphaTestFunc value)
+{
+ switch (value)
+ {
+ case AlphaTestFunc::AlwaysPass:
+ os << "GL_ALWAYS";
+ break;
+ case AlphaTestFunc::Equal:
+ os << "GL_EQUAL";
+ break;
+ case AlphaTestFunc::Gequal:
+ os << "GL_GEQUAL";
+ break;
+ case AlphaTestFunc::Greater:
+ os << "GL_GREATER";
+ break;
+ case AlphaTestFunc::Lequal:
+ os << "GL_LEQUAL";
+ break;
+ case AlphaTestFunc::Less:
+ os << "GL_LESS";
+ break;
+ case AlphaTestFunc::Never:
+ os << "GL_NEVER";
+ break;
+ case AlphaTestFunc::NotEqual:
+ os << "GL_NOTEQUAL";
+ break;
+ default:
+ os << "GL_INVALID_ENUM";
+ break;
+ }
+ return os;
+}
+
+template <>
+BufferBinding FromGLenum<BufferBinding>(GLenum from)
+{
+ switch (from)
+ {
+ case GL_ARRAY_BUFFER:
+ return BufferBinding::Array;
+ case GL_ATOMIC_COUNTER_BUFFER:
+ return BufferBinding::AtomicCounter;
+ case GL_COPY_READ_BUFFER:
+ return BufferBinding::CopyRead;
+ case GL_COPY_WRITE_BUFFER:
+ return BufferBinding::CopyWrite;
+ case GL_DISPATCH_INDIRECT_BUFFER:
+ return BufferBinding::DispatchIndirect;
+ case GL_DRAW_INDIRECT_BUFFER:
+ return BufferBinding::DrawIndirect;
+ case GL_ELEMENT_ARRAY_BUFFER:
+ return BufferBinding::ElementArray;
+ case GL_PIXEL_PACK_BUFFER:
+ return BufferBinding::PixelPack;
+ case GL_PIXEL_UNPACK_BUFFER:
+ return BufferBinding::PixelUnpack;
+ case GL_SHADER_STORAGE_BUFFER:
+ return BufferBinding::ShaderStorage;
+ case GL_TEXTURE_BUFFER:
+ return BufferBinding::Texture;
+ case GL_TRANSFORM_FEEDBACK_BUFFER:
+ return BufferBinding::TransformFeedback;
+ case GL_UNIFORM_BUFFER:
+ return BufferBinding::Uniform;
+ default:
+ return BufferBinding::InvalidEnum;
+ }
+}
+
+GLenum ToGLenum(BufferBinding from)
+{
+ switch (from)
+ {
+ case BufferBinding::Array:
+ return GL_ARRAY_BUFFER;
+ case BufferBinding::AtomicCounter:
+ return GL_ATOMIC_COUNTER_BUFFER;
+ case BufferBinding::CopyRead:
+ return GL_COPY_READ_BUFFER;
+ case BufferBinding::CopyWrite:
+ return GL_COPY_WRITE_BUFFER;
+ case BufferBinding::DispatchIndirect:
+ return GL_DISPATCH_INDIRECT_BUFFER;
+ case BufferBinding::DrawIndirect:
+ return GL_DRAW_INDIRECT_BUFFER;
+ case BufferBinding::ElementArray:
+ return GL_ELEMENT_ARRAY_BUFFER;
+ case BufferBinding::PixelPack:
+ return GL_PIXEL_PACK_BUFFER;
+ case BufferBinding::PixelUnpack:
+ return GL_PIXEL_UNPACK_BUFFER;
+ case BufferBinding::ShaderStorage:
+ return GL_SHADER_STORAGE_BUFFER;
+ case BufferBinding::Texture:
+ return GL_TEXTURE_BUFFER;
+ case BufferBinding::TransformFeedback:
+ return GL_TRANSFORM_FEEDBACK_BUFFER;
+ case BufferBinding::Uniform:
+ return GL_UNIFORM_BUFFER;
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+}
+
+std::ostream &operator<<(std::ostream &os, BufferBinding value)
+{
+ switch (value)
+ {
+ case BufferBinding::Array:
+ os << "GL_ARRAY_BUFFER";
+ break;
+ case BufferBinding::AtomicCounter:
+ os << "GL_ATOMIC_COUNTER_BUFFER";
+ break;
+ case BufferBinding::CopyRead:
+ os << "GL_COPY_READ_BUFFER";
+ break;
+ case BufferBinding::CopyWrite:
+ os << "GL_COPY_WRITE_BUFFER";
+ break;
+ case BufferBinding::DispatchIndirect:
+ os << "GL_DISPATCH_INDIRECT_BUFFER";
+ break;
+ case BufferBinding::DrawIndirect:
+ os << "GL_DRAW_INDIRECT_BUFFER";
+ break;
+ case BufferBinding::ElementArray:
+ os << "GL_ELEMENT_ARRAY_BUFFER";
+ break;
+ case BufferBinding::PixelPack:
+ os << "GL_PIXEL_PACK_BUFFER";
+ break;
+ case BufferBinding::PixelUnpack:
+ os << "GL_PIXEL_UNPACK_BUFFER";
+ break;
+ case BufferBinding::ShaderStorage:
+ os << "GL_SHADER_STORAGE_BUFFER";
+ break;
+ case BufferBinding::Texture:
+ os << "GL_TEXTURE_BUFFER";
+ break;
+ case BufferBinding::TransformFeedback:
+ os << "GL_TRANSFORM_FEEDBACK_BUFFER";
+ break;
+ case BufferBinding::Uniform:
+ os << "GL_UNIFORM_BUFFER";
+ break;
+ default:
+ os << "GL_INVALID_ENUM";
+ break;
+ }
+ return os;
+}
+
+template <>
+BufferUsage FromGLenum<BufferUsage>(GLenum from)
+{
+ switch (from)
+ {
+ case GL_DYNAMIC_COPY:
+ return BufferUsage::DynamicCopy;
+ case GL_DYNAMIC_DRAW:
+ return BufferUsage::DynamicDraw;
+ case GL_DYNAMIC_READ:
+ return BufferUsage::DynamicRead;
+ case GL_STATIC_COPY:
+ return BufferUsage::StaticCopy;
+ case GL_STATIC_DRAW:
+ return BufferUsage::StaticDraw;
+ case GL_STATIC_READ:
+ return BufferUsage::StaticRead;
+ case GL_STREAM_COPY:
+ return BufferUsage::StreamCopy;
+ case GL_STREAM_DRAW:
+ return BufferUsage::StreamDraw;
+ case GL_STREAM_READ:
+ return BufferUsage::StreamRead;
+ default:
+ return BufferUsage::InvalidEnum;
+ }
+}
+
+GLenum ToGLenum(BufferUsage from)
+{
+ switch (from)
+ {
+ case BufferUsage::DynamicCopy:
+ return GL_DYNAMIC_COPY;
+ case BufferUsage::DynamicDraw:
+ return GL_DYNAMIC_DRAW;
+ case BufferUsage::DynamicRead:
+ return GL_DYNAMIC_READ;
+ case BufferUsage::StaticCopy:
+ return GL_STATIC_COPY;
+ case BufferUsage::StaticDraw:
+ return GL_STATIC_DRAW;
+ case BufferUsage::StaticRead:
+ return GL_STATIC_READ;
+ case BufferUsage::StreamCopy:
+ return GL_STREAM_COPY;
+ case BufferUsage::StreamDraw:
+ return GL_STREAM_DRAW;
+ case BufferUsage::StreamRead:
+ return GL_STREAM_READ;
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+}
+
+std::ostream &operator<<(std::ostream &os, BufferUsage value)
+{
+ switch (value)
+ {
+ case BufferUsage::DynamicCopy:
+ os << "GL_DYNAMIC_COPY";
+ break;
+ case BufferUsage::DynamicDraw:
+ os << "GL_DYNAMIC_DRAW";
+ break;
+ case BufferUsage::DynamicRead:
+ os << "GL_DYNAMIC_READ";
+ break;
+ case BufferUsage::StaticCopy:
+ os << "GL_STATIC_COPY";
+ break;
+ case BufferUsage::StaticDraw:
+ os << "GL_STATIC_DRAW";
+ break;
+ case BufferUsage::StaticRead:
+ os << "GL_STATIC_READ";
+ break;
+ case BufferUsage::StreamCopy:
+ os << "GL_STREAM_COPY";
+ break;
+ case BufferUsage::StreamDraw:
+ os << "GL_STREAM_DRAW";
+ break;
+ case BufferUsage::StreamRead:
+ os << "GL_STREAM_READ";
+ break;
+ default:
+ os << "GL_INVALID_ENUM";
+ break;
+ }
+ return os;
+}
+
+template <>
+ClientVertexArrayType FromGLenum<ClientVertexArrayType>(GLenum from)
+{
+ switch (from)
+ {
+ case GL_COLOR_ARRAY:
+ return ClientVertexArrayType::Color;
+ case GL_NORMAL_ARRAY:
+ return ClientVertexArrayType::Normal;
+ case GL_POINT_SIZE_ARRAY_OES:
+ return ClientVertexArrayType::PointSize;
+ case GL_TEXTURE_COORD_ARRAY:
+ return ClientVertexArrayType::TextureCoord;
+ case GL_VERTEX_ARRAY:
+ return ClientVertexArrayType::Vertex;
+ default:
+ return ClientVertexArrayType::InvalidEnum;
+ }
+}
+
+GLenum ToGLenum(ClientVertexArrayType from)
+{
+ switch (from)
+ {
+ case ClientVertexArrayType::Color:
+ return GL_COLOR_ARRAY;
+ case ClientVertexArrayType::Normal:
+ return GL_NORMAL_ARRAY;
+ case ClientVertexArrayType::PointSize:
+ return GL_POINT_SIZE_ARRAY_OES;
+ case ClientVertexArrayType::TextureCoord:
+ return GL_TEXTURE_COORD_ARRAY;
+ case ClientVertexArrayType::Vertex:
+ return GL_VERTEX_ARRAY;
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+}
+
+std::ostream &operator<<(std::ostream &os, ClientVertexArrayType value)
+{
+ switch (value)
+ {
+ case ClientVertexArrayType::Color:
+ os << "GL_COLOR_ARRAY";
+ break;
+ case ClientVertexArrayType::Normal:
+ os << "GL_NORMAL_ARRAY";
+ break;
+ case ClientVertexArrayType::PointSize:
+ os << "GL_POINT_SIZE_ARRAY_OES";
+ break;
+ case ClientVertexArrayType::TextureCoord:
+ os << "GL_TEXTURE_COORD_ARRAY";
+ break;
+ case ClientVertexArrayType::Vertex:
+ os << "GL_VERTEX_ARRAY";
+ break;
+ default:
+ os << "GL_INVALID_ENUM";
+ break;
+ }
+ return os;
+}
+
+template <>
+CullFaceMode FromGLenum<CullFaceMode>(GLenum from)
+{
+ switch (from)
+ {
+ case GL_BACK:
+ return CullFaceMode::Back;
+ case GL_FRONT:
+ return CullFaceMode::Front;
+ case GL_FRONT_AND_BACK:
+ return CullFaceMode::FrontAndBack;
+ default:
+ return CullFaceMode::InvalidEnum;
+ }
+}
+
+GLenum ToGLenum(CullFaceMode from)
+{
+ switch (from)
+ {
+ case CullFaceMode::Back:
+ return GL_BACK;
+ case CullFaceMode::Front:
+ return GL_FRONT;
+ case CullFaceMode::FrontAndBack:
+ return GL_FRONT_AND_BACK;
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+}
+
+std::ostream &operator<<(std::ostream &os, CullFaceMode value)
+{
+ switch (value)
+ {
+ case CullFaceMode::Back:
+ os << "GL_BACK";
+ break;
+ case CullFaceMode::Front:
+ os << "GL_FRONT";
+ break;
+ case CullFaceMode::FrontAndBack:
+ os << "GL_FRONT_AND_BACK";
+ break;
+ default:
+ os << "GL_INVALID_ENUM";
+ break;
+ }
+ return os;
+}
+
+template <>
+FilterMode FromGLenum<FilterMode>(GLenum from)
+{
+ switch (from)
+ {
+ case GL_NEAREST:
+ return FilterMode::Nearest;
+ case GL_LINEAR:
+ return FilterMode::Linear;
+ case GL_NEAREST_MIPMAP_NEAREST:
+ return FilterMode::NearestMipmapNearest;
+ case GL_NEAREST_MIPMAP_LINEAR:
+ return FilterMode::NearestMipmapLinear;
+ case GL_LINEAR_MIPMAP_LINEAR:
+ return FilterMode::LinearMipmapLinear;
+ default:
+ return FilterMode::InvalidEnum;
+ }
+}
+
+GLenum ToGLenum(FilterMode from)
+{
+ switch (from)
+ {
+ case FilterMode::Nearest:
+ return GL_NEAREST;
+ case FilterMode::Linear:
+ return GL_LINEAR;
+ case FilterMode::NearestMipmapNearest:
+ return GL_NEAREST_MIPMAP_NEAREST;
+ case FilterMode::NearestMipmapLinear:
+ return GL_NEAREST_MIPMAP_LINEAR;
+ case FilterMode::LinearMipmapLinear:
+ return GL_LINEAR_MIPMAP_LINEAR;
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+}
+
+std::ostream &operator<<(std::ostream &os, FilterMode value)
+{
+ switch (value)
+ {
+ case FilterMode::Nearest:
+ os << "GL_NEAREST";
+ break;
+ case FilterMode::Linear:
+ os << "GL_LINEAR";
+ break;
+ case FilterMode::NearestMipmapNearest:
+ os << "GL_NEAREST_MIPMAP_NEAREST";
+ break;
+ case FilterMode::NearestMipmapLinear:
+ os << "GL_NEAREST_MIPMAP_LINEAR";
+ break;
+ case FilterMode::LinearMipmapLinear:
+ os << "GL_LINEAR_MIPMAP_LINEAR";
+ break;
+ default:
+ os << "GL_INVALID_ENUM";
+ break;
+ }
+ return os;
+}
+
+template <>
+FogMode FromGLenum<FogMode>(GLenum from)
+{
+ switch (from)
+ {
+ case GL_EXP:
+ return FogMode::Exp;
+ case GL_EXP2:
+ return FogMode::Exp2;
+ case GL_LINEAR:
+ return FogMode::Linear;
+ default:
+ return FogMode::InvalidEnum;
+ }
+}
+
+GLenum ToGLenum(FogMode from)
+{
+ switch (from)
+ {
+ case FogMode::Exp:
+ return GL_EXP;
+ case FogMode::Exp2:
+ return GL_EXP2;
+ case FogMode::Linear:
+ return GL_LINEAR;
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+}
+
+std::ostream &operator<<(std::ostream &os, FogMode value)
+{
+ switch (value)
+ {
+ case FogMode::Exp:
+ os << "GL_EXP";
+ break;
+ case FogMode::Exp2:
+ os << "GL_EXP2";
+ break;
+ case FogMode::Linear:
+ os << "GL_LINEAR";
+ break;
+ default:
+ os << "GL_INVALID_ENUM";
+ break;
+ }
+ return os;
+}
+
+template <>
+GraphicsResetStatus FromGLenum<GraphicsResetStatus>(GLenum from)
+{
+ switch (from)
+ {
+ case GL_NO_ERROR:
+ return GraphicsResetStatus::NoError;
+ case GL_GUILTY_CONTEXT_RESET:
+ return GraphicsResetStatus::GuiltyContextReset;
+ case GL_INNOCENT_CONTEXT_RESET:
+ return GraphicsResetStatus::InnocentContextReset;
+ case GL_UNKNOWN_CONTEXT_RESET:
+ return GraphicsResetStatus::UnknownContextReset;
+ case GL_PURGED_CONTEXT_RESET_NV:
+ return GraphicsResetStatus::PurgedContextResetNV;
+ default:
+ return GraphicsResetStatus::InvalidEnum;
+ }
+}
+
+GLenum ToGLenum(GraphicsResetStatus from)
+{
+ switch (from)
+ {
+ case GraphicsResetStatus::NoError:
+ return GL_NO_ERROR;
+ case GraphicsResetStatus::GuiltyContextReset:
+ return GL_GUILTY_CONTEXT_RESET;
+ case GraphicsResetStatus::InnocentContextReset:
+ return GL_INNOCENT_CONTEXT_RESET;
+ case GraphicsResetStatus::UnknownContextReset:
+ return GL_UNKNOWN_CONTEXT_RESET;
+ case GraphicsResetStatus::PurgedContextResetNV:
+ return GL_PURGED_CONTEXT_RESET_NV;
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+}
+
+std::ostream &operator<<(std::ostream &os, GraphicsResetStatus value)
+{
+ switch (value)
+ {
+ case GraphicsResetStatus::NoError:
+ os << "GL_NO_ERROR";
+ break;
+ case GraphicsResetStatus::GuiltyContextReset:
+ os << "GL_GUILTY_CONTEXT_RESET";
+ break;
+ case GraphicsResetStatus::InnocentContextReset:
+ os << "GL_INNOCENT_CONTEXT_RESET";
+ break;
+ case GraphicsResetStatus::UnknownContextReset:
+ os << "GL_UNKNOWN_CONTEXT_RESET";
+ break;
+ case GraphicsResetStatus::PurgedContextResetNV:
+ os << "GL_PURGED_CONTEXT_RESET_NV";
+ break;
+ default:
+ os << "GL_INVALID_ENUM";
+ break;
+ }
+ return os;
+}
+
+template <>
+HandleType FromGLenum<HandleType>(GLenum from)
+{
+ switch (from)
+ {
+ case GL_HANDLE_TYPE_OPAQUE_FD_EXT:
+ return HandleType::OpaqueFd;
+ case GL_HANDLE_TYPE_ZIRCON_VMO_ANGLE:
+ return HandleType::ZirconVmo;
+ case GL_HANDLE_TYPE_ZIRCON_EVENT_ANGLE:
+ return HandleType::ZirconEvent;
+ default:
+ return HandleType::InvalidEnum;
+ }
+}
+
+GLenum ToGLenum(HandleType from)
+{
+ switch (from)
+ {
+ case HandleType::OpaqueFd:
+ return GL_HANDLE_TYPE_OPAQUE_FD_EXT;
+ case HandleType::ZirconVmo:
+ return GL_HANDLE_TYPE_ZIRCON_VMO_ANGLE;
+ case HandleType::ZirconEvent:
+ return GL_HANDLE_TYPE_ZIRCON_EVENT_ANGLE;
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+}
+
+std::ostream &operator<<(std::ostream &os, HandleType value)
+{
+ switch (value)
+ {
+ case HandleType::OpaqueFd:
+ os << "GL_HANDLE_TYPE_OPAQUE_FD_EXT";
+ break;
+ case HandleType::ZirconVmo:
+ os << "GL_HANDLE_TYPE_ZIRCON_VMO_ANGLE";
+ break;
+ case HandleType::ZirconEvent:
+ os << "GL_HANDLE_TYPE_ZIRCON_EVENT_ANGLE";
+ break;
+ default:
+ os << "GL_INVALID_ENUM";
+ break;
+ }
+ return os;
+}
+
+template <>
+HintSetting FromGLenum<HintSetting>(GLenum from)
+{
+ switch (from)
+ {
+ case GL_DONT_CARE:
+ return HintSetting::DontCare;
+ case GL_FASTEST:
+ return HintSetting::Fastest;
+ case GL_NICEST:
+ return HintSetting::Nicest;
+ default:
+ return HintSetting::InvalidEnum;
+ }
+}
+
+GLenum ToGLenum(HintSetting from)
+{
+ switch (from)
+ {
+ case HintSetting::DontCare:
+ return GL_DONT_CARE;
+ case HintSetting::Fastest:
+ return GL_FASTEST;
+ case HintSetting::Nicest:
+ return GL_NICEST;
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+}
+
+std::ostream &operator<<(std::ostream &os, HintSetting value)
+{
+ switch (value)
+ {
+ case HintSetting::DontCare:
+ os << "GL_DONT_CARE";
+ break;
+ case HintSetting::Fastest:
+ os << "GL_FASTEST";
+ break;
+ case HintSetting::Nicest:
+ os << "GL_NICEST";
+ break;
+ default:
+ os << "GL_INVALID_ENUM";
+ break;
+ }
+ return os;
+}
+
+template <>
+ImageLayout FromGLenum<ImageLayout>(GLenum from)
+{
+ switch (from)
+ {
+ case GL_NONE:
+ return ImageLayout::Undefined;
+ case GL_LAYOUT_GENERAL_EXT:
+ return ImageLayout::General;
+ case GL_LAYOUT_COLOR_ATTACHMENT_EXT:
+ return ImageLayout::ColorAttachment;
+ case GL_LAYOUT_DEPTH_STENCIL_ATTACHMENT_EXT:
+ return ImageLayout::DepthStencilAttachment;
+ case GL_LAYOUT_DEPTH_STENCIL_READ_ONLY_EXT:
+ return ImageLayout::DepthStencilReadOnlyAttachment;
+ case GL_LAYOUT_SHADER_READ_ONLY_EXT:
+ return ImageLayout::ShaderReadOnly;
+ case GL_LAYOUT_TRANSFER_SRC_EXT:
+ return ImageLayout::TransferSrc;
+ case GL_LAYOUT_TRANSFER_DST_EXT:
+ return ImageLayout::TransferDst;
+ case GL_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_EXT:
+ return ImageLayout::DepthReadOnlyStencilAttachment;
+ case GL_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_EXT:
+ return ImageLayout::DepthAttachmentStencilReadOnly;
+ default:
+ return ImageLayout::InvalidEnum;
+ }
+}
+
+GLenum ToGLenum(ImageLayout from)
+{
+ switch (from)
+ {
+ case ImageLayout::Undefined:
+ return GL_NONE;
+ case ImageLayout::General:
+ return GL_LAYOUT_GENERAL_EXT;
+ case ImageLayout::ColorAttachment:
+ return GL_LAYOUT_COLOR_ATTACHMENT_EXT;
+ case ImageLayout::DepthStencilAttachment:
+ return GL_LAYOUT_DEPTH_STENCIL_ATTACHMENT_EXT;
+ case ImageLayout::DepthStencilReadOnlyAttachment:
+ return GL_LAYOUT_DEPTH_STENCIL_READ_ONLY_EXT;
+ case ImageLayout::ShaderReadOnly:
+ return GL_LAYOUT_SHADER_READ_ONLY_EXT;
+ case ImageLayout::TransferSrc:
+ return GL_LAYOUT_TRANSFER_SRC_EXT;
+ case ImageLayout::TransferDst:
+ return GL_LAYOUT_TRANSFER_DST_EXT;
+ case ImageLayout::DepthReadOnlyStencilAttachment:
+ return GL_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_EXT;
+ case ImageLayout::DepthAttachmentStencilReadOnly:
+ return GL_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_EXT;
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+}
+
+std::ostream &operator<<(std::ostream &os, ImageLayout value)
+{
+ switch (value)
+ {
+ case ImageLayout::Undefined:
+ os << "GL_NONE";
+ break;
+ case ImageLayout::General:
+ os << "GL_LAYOUT_GENERAL_EXT";
+ break;
+ case ImageLayout::ColorAttachment:
+ os << "GL_LAYOUT_COLOR_ATTACHMENT_EXT";
+ break;
+ case ImageLayout::DepthStencilAttachment:
+ os << "GL_LAYOUT_DEPTH_STENCIL_ATTACHMENT_EXT";
+ break;
+ case ImageLayout::DepthStencilReadOnlyAttachment:
+ os << "GL_LAYOUT_DEPTH_STENCIL_READ_ONLY_EXT";
+ break;
+ case ImageLayout::ShaderReadOnly:
+ os << "GL_LAYOUT_SHADER_READ_ONLY_EXT";
+ break;
+ case ImageLayout::TransferSrc:
+ os << "GL_LAYOUT_TRANSFER_SRC_EXT";
+ break;
+ case ImageLayout::TransferDst:
+ os << "GL_LAYOUT_TRANSFER_DST_EXT";
+ break;
+ case ImageLayout::DepthReadOnlyStencilAttachment:
+ os << "GL_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_EXT";
+ break;
+ case ImageLayout::DepthAttachmentStencilReadOnly:
+ os << "GL_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_EXT";
+ break;
+ default:
+ os << "GL_INVALID_ENUM";
+ break;
+ }
+ return os;
+}
+
+template <>
+LightParameter FromGLenum<LightParameter>(GLenum from)
+{
+ switch (from)
+ {
+ case GL_AMBIENT:
+ return LightParameter::Ambient;
+ case GL_AMBIENT_AND_DIFFUSE:
+ return LightParameter::AmbientAndDiffuse;
+ case GL_CONSTANT_ATTENUATION:
+ return LightParameter::ConstantAttenuation;
+ case GL_DIFFUSE:
+ return LightParameter::Diffuse;
+ case GL_LINEAR_ATTENUATION:
+ return LightParameter::LinearAttenuation;
+ case GL_POSITION:
+ return LightParameter::Position;
+ case GL_QUADRATIC_ATTENUATION:
+ return LightParameter::QuadraticAttenuation;
+ case GL_SPECULAR:
+ return LightParameter::Specular;
+ case GL_SPOT_CUTOFF:
+ return LightParameter::SpotCutoff;
+ case GL_SPOT_DIRECTION:
+ return LightParameter::SpotDirection;
+ case GL_SPOT_EXPONENT:
+ return LightParameter::SpotExponent;
+ default:
+ return LightParameter::InvalidEnum;
+ }
+}
+
+GLenum ToGLenum(LightParameter from)
+{
+ switch (from)
+ {
+ case LightParameter::Ambient:
+ return GL_AMBIENT;
+ case LightParameter::AmbientAndDiffuse:
+ return GL_AMBIENT_AND_DIFFUSE;
+ case LightParameter::ConstantAttenuation:
+ return GL_CONSTANT_ATTENUATION;
+ case LightParameter::Diffuse:
+ return GL_DIFFUSE;
+ case LightParameter::LinearAttenuation:
+ return GL_LINEAR_ATTENUATION;
+ case LightParameter::Position:
+ return GL_POSITION;
+ case LightParameter::QuadraticAttenuation:
+ return GL_QUADRATIC_ATTENUATION;
+ case LightParameter::Specular:
+ return GL_SPECULAR;
+ case LightParameter::SpotCutoff:
+ return GL_SPOT_CUTOFF;
+ case LightParameter::SpotDirection:
+ return GL_SPOT_DIRECTION;
+ case LightParameter::SpotExponent:
+ return GL_SPOT_EXPONENT;
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+}
+
+std::ostream &operator<<(std::ostream &os, LightParameter value)
+{
+ switch (value)
+ {
+ case LightParameter::Ambient:
+ os << "GL_AMBIENT";
+ break;
+ case LightParameter::AmbientAndDiffuse:
+ os << "GL_AMBIENT_AND_DIFFUSE";
+ break;
+ case LightParameter::ConstantAttenuation:
+ os << "GL_CONSTANT_ATTENUATION";
+ break;
+ case LightParameter::Diffuse:
+ os << "GL_DIFFUSE";
+ break;
+ case LightParameter::LinearAttenuation:
+ os << "GL_LINEAR_ATTENUATION";
+ break;
+ case LightParameter::Position:
+ os << "GL_POSITION";
+ break;
+ case LightParameter::QuadraticAttenuation:
+ os << "GL_QUADRATIC_ATTENUATION";
+ break;
+ case LightParameter::Specular:
+ os << "GL_SPECULAR";
+ break;
+ case LightParameter::SpotCutoff:
+ os << "GL_SPOT_CUTOFF";
+ break;
+ case LightParameter::SpotDirection:
+ os << "GL_SPOT_DIRECTION";
+ break;
+ case LightParameter::SpotExponent:
+ os << "GL_SPOT_EXPONENT";
+ break;
+ default:
+ os << "GL_INVALID_ENUM";
+ break;
+ }
+ return os;
+}
+
+template <>
+LogicalOperation FromGLenum<LogicalOperation>(GLenum from)
+{
+ switch (from)
+ {
+ case GL_AND:
+ return LogicalOperation::And;
+ case GL_AND_INVERTED:
+ return LogicalOperation::AndInverted;
+ case GL_AND_REVERSE:
+ return LogicalOperation::AndReverse;
+ case GL_CLEAR:
+ return LogicalOperation::Clear;
+ case GL_COPY:
+ return LogicalOperation::Copy;
+ case GL_COPY_INVERTED:
+ return LogicalOperation::CopyInverted;
+ case GL_EQUIV:
+ return LogicalOperation::Equiv;
+ case GL_INVERT:
+ return LogicalOperation::Invert;
+ case GL_NAND:
+ return LogicalOperation::Nand;
+ case GL_NOOP:
+ return LogicalOperation::Noop;
+ case GL_NOR:
+ return LogicalOperation::Nor;
+ case GL_OR:
+ return LogicalOperation::Or;
+ case GL_OR_INVERTED:
+ return LogicalOperation::OrInverted;
+ case GL_OR_REVERSE:
+ return LogicalOperation::OrReverse;
+ case GL_SET:
+ return LogicalOperation::Set;
+ case GL_XOR:
+ return LogicalOperation::Xor;
+ default:
+ return LogicalOperation::InvalidEnum;
+ }
+}
+
+GLenum ToGLenum(LogicalOperation from)
+{
+ switch (from)
+ {
+ case LogicalOperation::And:
+ return GL_AND;
+ case LogicalOperation::AndInverted:
+ return GL_AND_INVERTED;
+ case LogicalOperation::AndReverse:
+ return GL_AND_REVERSE;
+ case LogicalOperation::Clear:
+ return GL_CLEAR;
+ case LogicalOperation::Copy:
+ return GL_COPY;
+ case LogicalOperation::CopyInverted:
+ return GL_COPY_INVERTED;
+ case LogicalOperation::Equiv:
+ return GL_EQUIV;
+ case LogicalOperation::Invert:
+ return GL_INVERT;
+ case LogicalOperation::Nand:
+ return GL_NAND;
+ case LogicalOperation::Noop:
+ return GL_NOOP;
+ case LogicalOperation::Nor:
+ return GL_NOR;
+ case LogicalOperation::Or:
+ return GL_OR;
+ case LogicalOperation::OrInverted:
+ return GL_OR_INVERTED;
+ case LogicalOperation::OrReverse:
+ return GL_OR_REVERSE;
+ case LogicalOperation::Set:
+ return GL_SET;
+ case LogicalOperation::Xor:
+ return GL_XOR;
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+}
+
+std::ostream &operator<<(std::ostream &os, LogicalOperation value)
+{
+ switch (value)
+ {
+ case LogicalOperation::And:
+ os << "GL_AND";
+ break;
+ case LogicalOperation::AndInverted:
+ os << "GL_AND_INVERTED";
+ break;
+ case LogicalOperation::AndReverse:
+ os << "GL_AND_REVERSE";
+ break;
+ case LogicalOperation::Clear:
+ os << "GL_CLEAR";
+ break;
+ case LogicalOperation::Copy:
+ os << "GL_COPY";
+ break;
+ case LogicalOperation::CopyInverted:
+ os << "GL_COPY_INVERTED";
+ break;
+ case LogicalOperation::Equiv:
+ os << "GL_EQUIV";
+ break;
+ case LogicalOperation::Invert:
+ os << "GL_INVERT";
+ break;
+ case LogicalOperation::Nand:
+ os << "GL_NAND";
+ break;
+ case LogicalOperation::Noop:
+ os << "GL_NOOP";
+ break;
+ case LogicalOperation::Nor:
+ os << "GL_NOR";
+ break;
+ case LogicalOperation::Or:
+ os << "GL_OR";
+ break;
+ case LogicalOperation::OrInverted:
+ os << "GL_OR_INVERTED";
+ break;
+ case LogicalOperation::OrReverse:
+ os << "GL_OR_REVERSE";
+ break;
+ case LogicalOperation::Set:
+ os << "GL_SET";
+ break;
+ case LogicalOperation::Xor:
+ os << "GL_XOR";
+ break;
+ default:
+ os << "GL_INVALID_ENUM";
+ break;
+ }
+ return os;
+}
+
+template <>
+MaterialParameter FromGLenum<MaterialParameter>(GLenum from)
+{
+ switch (from)
+ {
+ case GL_AMBIENT:
+ return MaterialParameter::Ambient;
+ case GL_AMBIENT_AND_DIFFUSE:
+ return MaterialParameter::AmbientAndDiffuse;
+ case GL_DIFFUSE:
+ return MaterialParameter::Diffuse;
+ case GL_EMISSION:
+ return MaterialParameter::Emission;
+ case GL_SHININESS:
+ return MaterialParameter::Shininess;
+ case GL_SPECULAR:
+ return MaterialParameter::Specular;
+ default:
+ return MaterialParameter::InvalidEnum;
+ }
+}
+
+GLenum ToGLenum(MaterialParameter from)
+{
+ switch (from)
+ {
+ case MaterialParameter::Ambient:
+ return GL_AMBIENT;
+ case MaterialParameter::AmbientAndDiffuse:
+ return GL_AMBIENT_AND_DIFFUSE;
+ case MaterialParameter::Diffuse:
+ return GL_DIFFUSE;
+ case MaterialParameter::Emission:
+ return GL_EMISSION;
+ case MaterialParameter::Shininess:
+ return GL_SHININESS;
+ case MaterialParameter::Specular:
+ return GL_SPECULAR;
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+}
+
+std::ostream &operator<<(std::ostream &os, MaterialParameter value)
+{
+ switch (value)
+ {
+ case MaterialParameter::Ambient:
+ os << "GL_AMBIENT";
+ break;
+ case MaterialParameter::AmbientAndDiffuse:
+ os << "GL_AMBIENT_AND_DIFFUSE";
+ break;
+ case MaterialParameter::Diffuse:
+ os << "GL_DIFFUSE";
+ break;
+ case MaterialParameter::Emission:
+ os << "GL_EMISSION";
+ break;
+ case MaterialParameter::Shininess:
+ os << "GL_SHININESS";
+ break;
+ case MaterialParameter::Specular:
+ os << "GL_SPECULAR";
+ break;
+ default:
+ os << "GL_INVALID_ENUM";
+ break;
+ }
+ return os;
+}
+
+template <>
+MatrixType FromGLenum<MatrixType>(GLenum from)
+{
+ switch (from)
+ {
+ case GL_MODELVIEW:
+ return MatrixType::Modelview;
+ case GL_PROJECTION:
+ return MatrixType::Projection;
+ case GL_TEXTURE:
+ return MatrixType::Texture;
+ default:
+ return MatrixType::InvalidEnum;
+ }
+}
+
+GLenum ToGLenum(MatrixType from)
+{
+ switch (from)
+ {
+ case MatrixType::Modelview:
+ return GL_MODELVIEW;
+ case MatrixType::Projection:
+ return GL_PROJECTION;
+ case MatrixType::Texture:
+ return GL_TEXTURE;
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+}
+
+std::ostream &operator<<(std::ostream &os, MatrixType value)
+{
+ switch (value)
+ {
+ case MatrixType::Modelview:
+ os << "GL_MODELVIEW";
+ break;
+ case MatrixType::Projection:
+ os << "GL_PROJECTION";
+ break;
+ case MatrixType::Texture:
+ os << "GL_TEXTURE";
+ break;
+ default:
+ os << "GL_INVALID_ENUM";
+ break;
+ }
+ return os;
+}
+
+template <>
+PointParameter FromGLenum<PointParameter>(GLenum from)
+{
+ switch (from)
+ {
+ case GL_POINT_SIZE_MIN:
+ return PointParameter::PointSizeMin;
+ case GL_POINT_SIZE_MAX:
+ return PointParameter::PointSizeMax;
+ case GL_POINT_FADE_THRESHOLD_SIZE:
+ return PointParameter::PointFadeThresholdSize;
+ case GL_POINT_DISTANCE_ATTENUATION:
+ return PointParameter::PointDistanceAttenuation;
+ default:
+ return PointParameter::InvalidEnum;
+ }
+}
+
+GLenum ToGLenum(PointParameter from)
+{
+ switch (from)
+ {
+ case PointParameter::PointSizeMin:
+ return GL_POINT_SIZE_MIN;
+ case PointParameter::PointSizeMax:
+ return GL_POINT_SIZE_MAX;
+ case PointParameter::PointFadeThresholdSize:
+ return GL_POINT_FADE_THRESHOLD_SIZE;
+ case PointParameter::PointDistanceAttenuation:
+ return GL_POINT_DISTANCE_ATTENUATION;
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+}
+
+std::ostream &operator<<(std::ostream &os, PointParameter value)
+{
+ switch (value)
+ {
+ case PointParameter::PointSizeMin:
+ os << "GL_POINT_SIZE_MIN";
+ break;
+ case PointParameter::PointSizeMax:
+ os << "GL_POINT_SIZE_MAX";
+ break;
+ case PointParameter::PointFadeThresholdSize:
+ os << "GL_POINT_FADE_THRESHOLD_SIZE";
+ break;
+ case PointParameter::PointDistanceAttenuation:
+ os << "GL_POINT_DISTANCE_ATTENUATION";
+ break;
+ default:
+ os << "GL_INVALID_ENUM";
+ break;
+ }
+ return os;
+}
+
+template <>
+ProvokingVertexConvention FromGLenum<ProvokingVertexConvention>(GLenum from)
+{
+ switch (from)
+ {
+ case GL_FIRST_VERTEX_CONVENTION:
+ return ProvokingVertexConvention::FirstVertexConvention;
+ case GL_LAST_VERTEX_CONVENTION:
+ return ProvokingVertexConvention::LastVertexConvention;
+ default:
+ return ProvokingVertexConvention::InvalidEnum;
+ }
+}
+
+GLenum ToGLenum(ProvokingVertexConvention from)
+{
+ switch (from)
+ {
+ case ProvokingVertexConvention::FirstVertexConvention:
+ return GL_FIRST_VERTEX_CONVENTION;
+ case ProvokingVertexConvention::LastVertexConvention:
+ return GL_LAST_VERTEX_CONVENTION;
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+}
+
+std::ostream &operator<<(std::ostream &os, ProvokingVertexConvention value)
+{
+ switch (value)
+ {
+ case ProvokingVertexConvention::FirstVertexConvention:
+ os << "GL_FIRST_VERTEX_CONVENTION";
+ break;
+ case ProvokingVertexConvention::LastVertexConvention:
+ os << "GL_LAST_VERTEX_CONVENTION";
+ break;
+ default:
+ os << "GL_INVALID_ENUM";
+ break;
+ }
+ return os;
+}
+
+template <>
+QueryType FromGLenum<QueryType>(GLenum from)
+{
+ switch (from)
+ {
+ case GL_ANY_SAMPLES_PASSED:
+ return QueryType::AnySamples;
+ case GL_ANY_SAMPLES_PASSED_CONSERVATIVE:
+ return QueryType::AnySamplesConservative;
+ case GL_COMMANDS_COMPLETED_CHROMIUM:
+ return QueryType::CommandsCompleted;
+ case GL_PRIMITIVES_GENERATED_EXT:
+ return QueryType::PrimitivesGenerated;
+ case GL_TIME_ELAPSED_EXT:
+ return QueryType::TimeElapsed;
+ case GL_TIMESTAMP_EXT:
+ return QueryType::Timestamp;
+ case GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN:
+ return QueryType::TransformFeedbackPrimitivesWritten;
+ default:
+ return QueryType::InvalidEnum;
+ }
+}
+
+GLenum ToGLenum(QueryType from)
+{
+ switch (from)
+ {
+ case QueryType::AnySamples:
+ return GL_ANY_SAMPLES_PASSED;
+ case QueryType::AnySamplesConservative:
+ return GL_ANY_SAMPLES_PASSED_CONSERVATIVE;
+ case QueryType::CommandsCompleted:
+ return GL_COMMANDS_COMPLETED_CHROMIUM;
+ case QueryType::PrimitivesGenerated:
+ return GL_PRIMITIVES_GENERATED_EXT;
+ case QueryType::TimeElapsed:
+ return GL_TIME_ELAPSED_EXT;
+ case QueryType::Timestamp:
+ return GL_TIMESTAMP_EXT;
+ case QueryType::TransformFeedbackPrimitivesWritten:
+ return GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN;
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+}
+
+std::ostream &operator<<(std::ostream &os, QueryType value)
+{
+ switch (value)
+ {
+ case QueryType::AnySamples:
+ os << "GL_ANY_SAMPLES_PASSED";
+ break;
+ case QueryType::AnySamplesConservative:
+ os << "GL_ANY_SAMPLES_PASSED_CONSERVATIVE";
+ break;
+ case QueryType::CommandsCompleted:
+ os << "GL_COMMANDS_COMPLETED_CHROMIUM";
+ break;
+ case QueryType::PrimitivesGenerated:
+ os << "GL_PRIMITIVES_GENERATED_EXT";
+ break;
+ case QueryType::TimeElapsed:
+ os << "GL_TIME_ELAPSED_EXT";
+ break;
+ case QueryType::Timestamp:
+ os << "GL_TIMESTAMP_EXT";
+ break;
+ case QueryType::TransformFeedbackPrimitivesWritten:
+ os << "GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN";
+ break;
+ default:
+ os << "GL_INVALID_ENUM";
+ break;
+ }
+ return os;
+}
+
+template <>
+ShaderType FromGLenum<ShaderType>(GLenum from)
+{
+ switch (from)
+ {
+ case GL_VERTEX_SHADER:
+ return ShaderType::Vertex;
+ case GL_TESS_CONTROL_SHADER_EXT:
+ return ShaderType::TessControl;
+ case GL_TESS_EVALUATION_SHADER_EXT:
+ return ShaderType::TessEvaluation;
+ case GL_GEOMETRY_SHADER_EXT:
+ return ShaderType::Geometry;
+ case GL_FRAGMENT_SHADER:
+ return ShaderType::Fragment;
+ case GL_COMPUTE_SHADER:
+ return ShaderType::Compute;
+ default:
+ return ShaderType::InvalidEnum;
+ }
+}
+
+GLenum ToGLenum(ShaderType from)
+{
+ switch (from)
+ {
+ case ShaderType::Vertex:
+ return GL_VERTEX_SHADER;
+ case ShaderType::TessControl:
+ return GL_TESS_CONTROL_SHADER_EXT;
+ case ShaderType::TessEvaluation:
+ return GL_TESS_EVALUATION_SHADER_EXT;
+ case ShaderType::Geometry:
+ return GL_GEOMETRY_SHADER_EXT;
+ case ShaderType::Fragment:
+ return GL_FRAGMENT_SHADER;
+ case ShaderType::Compute:
+ return GL_COMPUTE_SHADER;
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+}
+
+std::ostream &operator<<(std::ostream &os, ShaderType value)
+{
+ switch (value)
+ {
+ case ShaderType::Vertex:
+ os << "GL_VERTEX_SHADER";
+ break;
+ case ShaderType::TessControl:
+ os << "GL_TESS_CONTROL_SHADER_EXT";
+ break;
+ case ShaderType::TessEvaluation:
+ os << "GL_TESS_EVALUATION_SHADER_EXT";
+ break;
+ case ShaderType::Geometry:
+ os << "GL_GEOMETRY_SHADER_EXT";
+ break;
+ case ShaderType::Fragment:
+ os << "GL_FRAGMENT_SHADER";
+ break;
+ case ShaderType::Compute:
+ os << "GL_COMPUTE_SHADER";
+ break;
+ default:
+ os << "GL_INVALID_ENUM";
+ break;
+ }
+ return os;
+}
+
+template <>
+ShadingModel FromGLenum<ShadingModel>(GLenum from)
+{
+ switch (from)
+ {
+ case GL_FLAT:
+ return ShadingModel::Flat;
+ case GL_SMOOTH:
+ return ShadingModel::Smooth;
+ default:
+ return ShadingModel::InvalidEnum;
+ }
+}
+
+GLenum ToGLenum(ShadingModel from)
+{
+ switch (from)
+ {
+ case ShadingModel::Flat:
+ return GL_FLAT;
+ case ShadingModel::Smooth:
+ return GL_SMOOTH;
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+}
+
+std::ostream &operator<<(std::ostream &os, ShadingModel value)
+{
+ switch (value)
+ {
+ case ShadingModel::Flat:
+ os << "GL_FLAT";
+ break;
+ case ShadingModel::Smooth:
+ os << "GL_SMOOTH";
+ break;
+ default:
+ os << "GL_INVALID_ENUM";
+ break;
+ }
+ return os;
+}
+
+template <>
+ShadingRate FromGLenum<ShadingRate>(GLenum from)
+{
+ switch (from)
+ {
+ case GL_NONE:
+ return ShadingRate::Undefined;
+ case GL_SHADING_RATE_1X1_PIXELS_QCOM:
+ return ShadingRate::_1x1;
+ case GL_SHADING_RATE_1X2_PIXELS_QCOM:
+ return ShadingRate::_1x2;
+ case GL_SHADING_RATE_2X1_PIXELS_QCOM:
+ return ShadingRate::_2x1;
+ case GL_SHADING_RATE_2X2_PIXELS_QCOM:
+ return ShadingRate::_2x2;
+ case GL_SHADING_RATE_4X2_PIXELS_QCOM:
+ return ShadingRate::_4x2;
+ case GL_SHADING_RATE_4X4_PIXELS_QCOM:
+ return ShadingRate::_4x4;
+ default:
+ return ShadingRate::InvalidEnum;
+ }
+}
+
+GLenum ToGLenum(ShadingRate from)
+{
+ switch (from)
+ {
+ case ShadingRate::Undefined:
+ return GL_NONE;
+ case ShadingRate::_1x1:
+ return GL_SHADING_RATE_1X1_PIXELS_QCOM;
+ case ShadingRate::_1x2:
+ return GL_SHADING_RATE_1X2_PIXELS_QCOM;
+ case ShadingRate::_2x1:
+ return GL_SHADING_RATE_2X1_PIXELS_QCOM;
+ case ShadingRate::_2x2:
+ return GL_SHADING_RATE_2X2_PIXELS_QCOM;
+ case ShadingRate::_4x2:
+ return GL_SHADING_RATE_4X2_PIXELS_QCOM;
+ case ShadingRate::_4x4:
+ return GL_SHADING_RATE_4X4_PIXELS_QCOM;
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+}
+
+std::ostream &operator<<(std::ostream &os, ShadingRate value)
+{
+ switch (value)
+ {
+ case ShadingRate::Undefined:
+ os << "GL_NONE";
+ break;
+ case ShadingRate::_1x1:
+ os << "GL_SHADING_RATE_1X1_PIXELS_QCOM";
+ break;
+ case ShadingRate::_1x2:
+ os << "GL_SHADING_RATE_1X2_PIXELS_QCOM";
+ break;
+ case ShadingRate::_2x1:
+ os << "GL_SHADING_RATE_2X1_PIXELS_QCOM";
+ break;
+ case ShadingRate::_2x2:
+ os << "GL_SHADING_RATE_2X2_PIXELS_QCOM";
+ break;
+ case ShadingRate::_4x2:
+ os << "GL_SHADING_RATE_4X2_PIXELS_QCOM";
+ break;
+ case ShadingRate::_4x4:
+ os << "GL_SHADING_RATE_4X4_PIXELS_QCOM";
+ break;
+ default:
+ os << "GL_INVALID_ENUM";
+ break;
+ }
+ return os;
+}
+
+template <>
+TextureCombine FromGLenum<TextureCombine>(GLenum from)
+{
+ switch (from)
+ {
+ case GL_ADD:
+ return TextureCombine::Add;
+ case GL_ADD_SIGNED:
+ return TextureCombine::AddSigned;
+ case GL_DOT3_RGB:
+ return TextureCombine::Dot3Rgb;
+ case GL_DOT3_RGBA:
+ return TextureCombine::Dot3Rgba;
+ case GL_INTERPOLATE:
+ return TextureCombine::Interpolate;
+ case GL_MODULATE:
+ return TextureCombine::Modulate;
+ case GL_REPLACE:
+ return TextureCombine::Replace;
+ case GL_SUBTRACT:
+ return TextureCombine::Subtract;
+ default:
+ return TextureCombine::InvalidEnum;
+ }
+}
+
+GLenum ToGLenum(TextureCombine from)
+{
+ switch (from)
+ {
+ case TextureCombine::Add:
+ return GL_ADD;
+ case TextureCombine::AddSigned:
+ return GL_ADD_SIGNED;
+ case TextureCombine::Dot3Rgb:
+ return GL_DOT3_RGB;
+ case TextureCombine::Dot3Rgba:
+ return GL_DOT3_RGBA;
+ case TextureCombine::Interpolate:
+ return GL_INTERPOLATE;
+ case TextureCombine::Modulate:
+ return GL_MODULATE;
+ case TextureCombine::Replace:
+ return GL_REPLACE;
+ case TextureCombine::Subtract:
+ return GL_SUBTRACT;
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+}
+
+std::ostream &operator<<(std::ostream &os, TextureCombine value)
+{
+ switch (value)
+ {
+ case TextureCombine::Add:
+ os << "GL_ADD";
+ break;
+ case TextureCombine::AddSigned:
+ os << "GL_ADD_SIGNED";
+ break;
+ case TextureCombine::Dot3Rgb:
+ os << "GL_DOT3_RGB";
+ break;
+ case TextureCombine::Dot3Rgba:
+ os << "GL_DOT3_RGBA";
+ break;
+ case TextureCombine::Interpolate:
+ os << "GL_INTERPOLATE";
+ break;
+ case TextureCombine::Modulate:
+ os << "GL_MODULATE";
+ break;
+ case TextureCombine::Replace:
+ os << "GL_REPLACE";
+ break;
+ case TextureCombine::Subtract:
+ os << "GL_SUBTRACT";
+ break;
+ default:
+ os << "GL_INVALID_ENUM";
+ break;
+ }
+ return os;
+}
+
+template <>
+TextureEnvMode FromGLenum<TextureEnvMode>(GLenum from)
+{
+ switch (from)
+ {
+ case GL_ADD:
+ return TextureEnvMode::Add;
+ case GL_BLEND:
+ return TextureEnvMode::Blend;
+ case GL_COMBINE:
+ return TextureEnvMode::Combine;
+ case GL_DECAL:
+ return TextureEnvMode::Decal;
+ case GL_MODULATE:
+ return TextureEnvMode::Modulate;
+ case GL_REPLACE:
+ return TextureEnvMode::Replace;
+ default:
+ return TextureEnvMode::InvalidEnum;
+ }
+}
+
+GLenum ToGLenum(TextureEnvMode from)
+{
+ switch (from)
+ {
+ case TextureEnvMode::Add:
+ return GL_ADD;
+ case TextureEnvMode::Blend:
+ return GL_BLEND;
+ case TextureEnvMode::Combine:
+ return GL_COMBINE;
+ case TextureEnvMode::Decal:
+ return GL_DECAL;
+ case TextureEnvMode::Modulate:
+ return GL_MODULATE;
+ case TextureEnvMode::Replace:
+ return GL_REPLACE;
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+}
+
+std::ostream &operator<<(std::ostream &os, TextureEnvMode value)
+{
+ switch (value)
+ {
+ case TextureEnvMode::Add:
+ os << "GL_ADD";
+ break;
+ case TextureEnvMode::Blend:
+ os << "GL_BLEND";
+ break;
+ case TextureEnvMode::Combine:
+ os << "GL_COMBINE";
+ break;
+ case TextureEnvMode::Decal:
+ os << "GL_DECAL";
+ break;
+ case TextureEnvMode::Modulate:
+ os << "GL_MODULATE";
+ break;
+ case TextureEnvMode::Replace:
+ os << "GL_REPLACE";
+ break;
+ default:
+ os << "GL_INVALID_ENUM";
+ break;
+ }
+ return os;
+}
+
+template <>
+TextureEnvParameter FromGLenum<TextureEnvParameter>(GLenum from)
+{
+ switch (from)
+ {
+ case GL_TEXTURE_ENV_MODE:
+ return TextureEnvParameter::Mode;
+ case GL_TEXTURE_ENV_COLOR:
+ return TextureEnvParameter::Color;
+ case GL_COMBINE_RGB:
+ return TextureEnvParameter::CombineRgb;
+ case GL_COMBINE_ALPHA:
+ return TextureEnvParameter::CombineAlpha;
+ case GL_RGB_SCALE:
+ return TextureEnvParameter::RgbScale;
+ case GL_ALPHA_SCALE:
+ return TextureEnvParameter::AlphaScale;
+ case GL_SRC0_RGB:
+ return TextureEnvParameter::Src0Rgb;
+ case GL_SRC1_RGB:
+ return TextureEnvParameter::Src1Rgb;
+ case GL_SRC2_RGB:
+ return TextureEnvParameter::Src2Rgb;
+ case GL_SRC0_ALPHA:
+ return TextureEnvParameter::Src0Alpha;
+ case GL_SRC1_ALPHA:
+ return TextureEnvParameter::Src1Alpha;
+ case GL_SRC2_ALPHA:
+ return TextureEnvParameter::Src2Alpha;
+ case GL_OPERAND0_RGB:
+ return TextureEnvParameter::Op0Rgb;
+ case GL_OPERAND1_RGB:
+ return TextureEnvParameter::Op1Rgb;
+ case GL_OPERAND2_RGB:
+ return TextureEnvParameter::Op2Rgb;
+ case GL_OPERAND0_ALPHA:
+ return TextureEnvParameter::Op0Alpha;
+ case GL_OPERAND1_ALPHA:
+ return TextureEnvParameter::Op1Alpha;
+ case GL_OPERAND2_ALPHA:
+ return TextureEnvParameter::Op2Alpha;
+ case GL_COORD_REPLACE_OES:
+ return TextureEnvParameter::PointCoordReplace;
+ default:
+ return TextureEnvParameter::InvalidEnum;
+ }
+}
+
+GLenum ToGLenum(TextureEnvParameter from)
+{
+ switch (from)
+ {
+ case TextureEnvParameter::Mode:
+ return GL_TEXTURE_ENV_MODE;
+ case TextureEnvParameter::Color:
+ return GL_TEXTURE_ENV_COLOR;
+ case TextureEnvParameter::CombineRgb:
+ return GL_COMBINE_RGB;
+ case TextureEnvParameter::CombineAlpha:
+ return GL_COMBINE_ALPHA;
+ case TextureEnvParameter::RgbScale:
+ return GL_RGB_SCALE;
+ case TextureEnvParameter::AlphaScale:
+ return GL_ALPHA_SCALE;
+ case TextureEnvParameter::Src0Rgb:
+ return GL_SRC0_RGB;
+ case TextureEnvParameter::Src1Rgb:
+ return GL_SRC1_RGB;
+ case TextureEnvParameter::Src2Rgb:
+ return GL_SRC2_RGB;
+ case TextureEnvParameter::Src0Alpha:
+ return GL_SRC0_ALPHA;
+ case TextureEnvParameter::Src1Alpha:
+ return GL_SRC1_ALPHA;
+ case TextureEnvParameter::Src2Alpha:
+ return GL_SRC2_ALPHA;
+ case TextureEnvParameter::Op0Rgb:
+ return GL_OPERAND0_RGB;
+ case TextureEnvParameter::Op1Rgb:
+ return GL_OPERAND1_RGB;
+ case TextureEnvParameter::Op2Rgb:
+ return GL_OPERAND2_RGB;
+ case TextureEnvParameter::Op0Alpha:
+ return GL_OPERAND0_ALPHA;
+ case TextureEnvParameter::Op1Alpha:
+ return GL_OPERAND1_ALPHA;
+ case TextureEnvParameter::Op2Alpha:
+ return GL_OPERAND2_ALPHA;
+ case TextureEnvParameter::PointCoordReplace:
+ return GL_COORD_REPLACE_OES;
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+}
+
+std::ostream &operator<<(std::ostream &os, TextureEnvParameter value)
+{
+ switch (value)
+ {
+ case TextureEnvParameter::Mode:
+ os << "GL_TEXTURE_ENV_MODE";
+ break;
+ case TextureEnvParameter::Color:
+ os << "GL_TEXTURE_ENV_COLOR";
+ break;
+ case TextureEnvParameter::CombineRgb:
+ os << "GL_COMBINE_RGB";
+ break;
+ case TextureEnvParameter::CombineAlpha:
+ os << "GL_COMBINE_ALPHA";
+ break;
+ case TextureEnvParameter::RgbScale:
+ os << "GL_RGB_SCALE";
+ break;
+ case TextureEnvParameter::AlphaScale:
+ os << "GL_ALPHA_SCALE";
+ break;
+ case TextureEnvParameter::Src0Rgb:
+ os << "GL_SRC0_RGB";
+ break;
+ case TextureEnvParameter::Src1Rgb:
+ os << "GL_SRC1_RGB";
+ break;
+ case TextureEnvParameter::Src2Rgb:
+ os << "GL_SRC2_RGB";
+ break;
+ case TextureEnvParameter::Src0Alpha:
+ os << "GL_SRC0_ALPHA";
+ break;
+ case TextureEnvParameter::Src1Alpha:
+ os << "GL_SRC1_ALPHA";
+ break;
+ case TextureEnvParameter::Src2Alpha:
+ os << "GL_SRC2_ALPHA";
+ break;
+ case TextureEnvParameter::Op0Rgb:
+ os << "GL_OPERAND0_RGB";
+ break;
+ case TextureEnvParameter::Op1Rgb:
+ os << "GL_OPERAND1_RGB";
+ break;
+ case TextureEnvParameter::Op2Rgb:
+ os << "GL_OPERAND2_RGB";
+ break;
+ case TextureEnvParameter::Op0Alpha:
+ os << "GL_OPERAND0_ALPHA";
+ break;
+ case TextureEnvParameter::Op1Alpha:
+ os << "GL_OPERAND1_ALPHA";
+ break;
+ case TextureEnvParameter::Op2Alpha:
+ os << "GL_OPERAND2_ALPHA";
+ break;
+ case TextureEnvParameter::PointCoordReplace:
+ os << "GL_COORD_REPLACE_OES";
+ break;
+ default:
+ os << "GL_INVALID_ENUM";
+ break;
+ }
+ return os;
+}
+
+template <>
+TextureEnvTarget FromGLenum<TextureEnvTarget>(GLenum from)
+{
+ switch (from)
+ {
+ case GL_TEXTURE_ENV:
+ return TextureEnvTarget::Env;
+ case GL_POINT_SPRITE_OES:
+ return TextureEnvTarget::PointSprite;
+ default:
+ return TextureEnvTarget::InvalidEnum;
+ }
+}
+
+GLenum ToGLenum(TextureEnvTarget from)
+{
+ switch (from)
+ {
+ case TextureEnvTarget::Env:
+ return GL_TEXTURE_ENV;
+ case TextureEnvTarget::PointSprite:
+ return GL_POINT_SPRITE_OES;
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+}
+
+std::ostream &operator<<(std::ostream &os, TextureEnvTarget value)
+{
+ switch (value)
+ {
+ case TextureEnvTarget::Env:
+ os << "GL_TEXTURE_ENV";
+ break;
+ case TextureEnvTarget::PointSprite:
+ os << "GL_POINT_SPRITE_OES";
+ break;
+ default:
+ os << "GL_INVALID_ENUM";
+ break;
+ }
+ return os;
+}
+
+template <>
+TextureOp FromGLenum<TextureOp>(GLenum from)
+{
+ switch (from)
+ {
+ case GL_ONE_MINUS_SRC_ALPHA:
+ return TextureOp::OneMinusSrcAlpha;
+ case GL_ONE_MINUS_SRC_COLOR:
+ return TextureOp::OneMinusSrcColor;
+ case GL_SRC_ALPHA:
+ return TextureOp::SrcAlpha;
+ case GL_SRC_COLOR:
+ return TextureOp::SrcColor;
+ default:
+ return TextureOp::InvalidEnum;
+ }
+}
+
+GLenum ToGLenum(TextureOp from)
+{
+ switch (from)
+ {
+ case TextureOp::OneMinusSrcAlpha:
+ return GL_ONE_MINUS_SRC_ALPHA;
+ case TextureOp::OneMinusSrcColor:
+ return GL_ONE_MINUS_SRC_COLOR;
+ case TextureOp::SrcAlpha:
+ return GL_SRC_ALPHA;
+ case TextureOp::SrcColor:
+ return GL_SRC_COLOR;
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+}
+
+std::ostream &operator<<(std::ostream &os, TextureOp value)
+{
+ switch (value)
+ {
+ case TextureOp::OneMinusSrcAlpha:
+ os << "GL_ONE_MINUS_SRC_ALPHA";
+ break;
+ case TextureOp::OneMinusSrcColor:
+ os << "GL_ONE_MINUS_SRC_COLOR";
+ break;
+ case TextureOp::SrcAlpha:
+ os << "GL_SRC_ALPHA";
+ break;
+ case TextureOp::SrcColor:
+ os << "GL_SRC_COLOR";
+ break;
+ default:
+ os << "GL_INVALID_ENUM";
+ break;
+ }
+ return os;
+}
+
+template <>
+TextureSrc FromGLenum<TextureSrc>(GLenum from)
+{
+ switch (from)
+ {
+ case GL_CONSTANT:
+ return TextureSrc::Constant;
+ case GL_PREVIOUS:
+ return TextureSrc::Previous;
+ case GL_PRIMARY_COLOR:
+ return TextureSrc::PrimaryColor;
+ case GL_TEXTURE:
+ return TextureSrc::Texture;
+ default:
+ return TextureSrc::InvalidEnum;
+ }
+}
+
+GLenum ToGLenum(TextureSrc from)
+{
+ switch (from)
+ {
+ case TextureSrc::Constant:
+ return GL_CONSTANT;
+ case TextureSrc::Previous:
+ return GL_PREVIOUS;
+ case TextureSrc::PrimaryColor:
+ return GL_PRIMARY_COLOR;
+ case TextureSrc::Texture:
+ return GL_TEXTURE;
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+}
+
+std::ostream &operator<<(std::ostream &os, TextureSrc value)
+{
+ switch (value)
+ {
+ case TextureSrc::Constant:
+ os << "GL_CONSTANT";
+ break;
+ case TextureSrc::Previous:
+ os << "GL_PREVIOUS";
+ break;
+ case TextureSrc::PrimaryColor:
+ os << "GL_PRIMARY_COLOR";
+ break;
+ case TextureSrc::Texture:
+ os << "GL_TEXTURE";
+ break;
+ default:
+ os << "GL_INVALID_ENUM";
+ break;
+ }
+ return os;
+}
+
+template <>
+TextureTarget FromGLenum<TextureTarget>(GLenum from)
+{
+ switch (from)
+ {
+ case GL_TEXTURE_2D:
+ return TextureTarget::_2D;
+ case GL_TEXTURE_2D_ARRAY:
+ return TextureTarget::_2DArray;
+ case GL_TEXTURE_2D_MULTISAMPLE:
+ return TextureTarget::_2DMultisample;
+ case GL_TEXTURE_2D_MULTISAMPLE_ARRAY_OES:
+ return TextureTarget::_2DMultisampleArray;
+ case GL_TEXTURE_3D:
+ return TextureTarget::_3D;
+ case GL_TEXTURE_EXTERNAL_OES:
+ return TextureTarget::External;
+ case GL_TEXTURE_RECTANGLE_ANGLE:
+ return TextureTarget::Rectangle;
+ case GL_TEXTURE_CUBE_MAP_POSITIVE_X:
+ return TextureTarget::CubeMapPositiveX;
+ case GL_TEXTURE_CUBE_MAP_NEGATIVE_X:
+ return TextureTarget::CubeMapNegativeX;
+ case GL_TEXTURE_CUBE_MAP_POSITIVE_Y:
+ return TextureTarget::CubeMapPositiveY;
+ case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y:
+ return TextureTarget::CubeMapNegativeY;
+ case GL_TEXTURE_CUBE_MAP_POSITIVE_Z:
+ return TextureTarget::CubeMapPositiveZ;
+ case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z:
+ return TextureTarget::CubeMapNegativeZ;
+ case GL_TEXTURE_CUBE_MAP_ARRAY:
+ return TextureTarget::CubeMapArray;
+ case GL_TEXTURE_VIDEO_IMAGE_WEBGL:
+ return TextureTarget::VideoImage;
+ case GL_TEXTURE_BUFFER:
+ return TextureTarget::Buffer;
+ default:
+ return TextureTarget::InvalidEnum;
+ }
+}
+
+GLenum ToGLenum(TextureTarget from)
+{
+ switch (from)
+ {
+ case TextureTarget::_2D:
+ return GL_TEXTURE_2D;
+ case TextureTarget::_2DArray:
+ return GL_TEXTURE_2D_ARRAY;
+ case TextureTarget::_2DMultisample:
+ return GL_TEXTURE_2D_MULTISAMPLE;
+ case TextureTarget::_2DMultisampleArray:
+ return GL_TEXTURE_2D_MULTISAMPLE_ARRAY_OES;
+ case TextureTarget::_3D:
+ return GL_TEXTURE_3D;
+ case TextureTarget::External:
+ return GL_TEXTURE_EXTERNAL_OES;
+ case TextureTarget::Rectangle:
+ return GL_TEXTURE_RECTANGLE_ANGLE;
+ case TextureTarget::CubeMapPositiveX:
+ return GL_TEXTURE_CUBE_MAP_POSITIVE_X;
+ case TextureTarget::CubeMapNegativeX:
+ return GL_TEXTURE_CUBE_MAP_NEGATIVE_X;
+ case TextureTarget::CubeMapPositiveY:
+ return GL_TEXTURE_CUBE_MAP_POSITIVE_Y;
+ case TextureTarget::CubeMapNegativeY:
+ return GL_TEXTURE_CUBE_MAP_NEGATIVE_Y;
+ case TextureTarget::CubeMapPositiveZ:
+ return GL_TEXTURE_CUBE_MAP_POSITIVE_Z;
+ case TextureTarget::CubeMapNegativeZ:
+ return GL_TEXTURE_CUBE_MAP_NEGATIVE_Z;
+ case TextureTarget::CubeMapArray:
+ return GL_TEXTURE_CUBE_MAP_ARRAY;
+ case TextureTarget::VideoImage:
+ return GL_TEXTURE_VIDEO_IMAGE_WEBGL;
+ case TextureTarget::Buffer:
+ return GL_TEXTURE_BUFFER;
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+}
+
+std::ostream &operator<<(std::ostream &os, TextureTarget value)
+{
+ switch (value)
+ {
+ case TextureTarget::_2D:
+ os << "GL_TEXTURE_2D";
+ break;
+ case TextureTarget::_2DArray:
+ os << "GL_TEXTURE_2D_ARRAY";
+ break;
+ case TextureTarget::_2DMultisample:
+ os << "GL_TEXTURE_2D_MULTISAMPLE";
+ break;
+ case TextureTarget::_2DMultisampleArray:
+ os << "GL_TEXTURE_2D_MULTISAMPLE_ARRAY_OES";
+ break;
+ case TextureTarget::_3D:
+ os << "GL_TEXTURE_3D";
+ break;
+ case TextureTarget::External:
+ os << "GL_TEXTURE_EXTERNAL_OES";
+ break;
+ case TextureTarget::Rectangle:
+ os << "GL_TEXTURE_RECTANGLE_ANGLE";
+ break;
+ case TextureTarget::CubeMapPositiveX:
+ os << "GL_TEXTURE_CUBE_MAP_POSITIVE_X";
+ break;
+ case TextureTarget::CubeMapNegativeX:
+ os << "GL_TEXTURE_CUBE_MAP_NEGATIVE_X";
+ break;
+ case TextureTarget::CubeMapPositiveY:
+ os << "GL_TEXTURE_CUBE_MAP_POSITIVE_Y";
+ break;
+ case TextureTarget::CubeMapNegativeY:
+ os << "GL_TEXTURE_CUBE_MAP_NEGATIVE_Y";
+ break;
+ case TextureTarget::CubeMapPositiveZ:
+ os << "GL_TEXTURE_CUBE_MAP_POSITIVE_Z";
+ break;
+ case TextureTarget::CubeMapNegativeZ:
+ os << "GL_TEXTURE_CUBE_MAP_NEGATIVE_Z";
+ break;
+ case TextureTarget::CubeMapArray:
+ os << "GL_TEXTURE_CUBE_MAP_ARRAY";
+ break;
+ case TextureTarget::VideoImage:
+ os << "GL_TEXTURE_VIDEO_IMAGE_WEBGL";
+ break;
+ case TextureTarget::Buffer:
+ os << "GL_TEXTURE_BUFFER";
+ break;
+ default:
+ os << "GL_INVALID_ENUM";
+ break;
+ }
+ return os;
+}
+
+template <>
+TextureType FromGLenum<TextureType>(GLenum from)
+{
+ switch (from)
+ {
+ case GL_TEXTURE_2D:
+ return TextureType::_2D;
+ case GL_TEXTURE_2D_ARRAY:
+ return TextureType::_2DArray;
+ case GL_TEXTURE_2D_MULTISAMPLE:
+ return TextureType::_2DMultisample;
+ case GL_TEXTURE_2D_MULTISAMPLE_ARRAY_OES:
+ return TextureType::_2DMultisampleArray;
+ case GL_TEXTURE_3D:
+ return TextureType::_3D;
+ case GL_TEXTURE_EXTERNAL_OES:
+ return TextureType::External;
+ case GL_TEXTURE_RECTANGLE_ANGLE:
+ return TextureType::Rectangle;
+ case GL_TEXTURE_CUBE_MAP:
+ return TextureType::CubeMap;
+ case GL_TEXTURE_CUBE_MAP_ARRAY:
+ return TextureType::CubeMapArray;
+ case GL_TEXTURE_VIDEO_IMAGE_WEBGL:
+ return TextureType::VideoImage;
+ case GL_TEXTURE_BUFFER:
+ return TextureType::Buffer;
+ default:
+ return TextureType::InvalidEnum;
+ }
+}
+
+GLenum ToGLenum(TextureType from)
+{
+ switch (from)
+ {
+ case TextureType::_2D:
+ return GL_TEXTURE_2D;
+ case TextureType::_2DArray:
+ return GL_TEXTURE_2D_ARRAY;
+ case TextureType::_2DMultisample:
+ return GL_TEXTURE_2D_MULTISAMPLE;
+ case TextureType::_2DMultisampleArray:
+ return GL_TEXTURE_2D_MULTISAMPLE_ARRAY_OES;
+ case TextureType::_3D:
+ return GL_TEXTURE_3D;
+ case TextureType::External:
+ return GL_TEXTURE_EXTERNAL_OES;
+ case TextureType::Rectangle:
+ return GL_TEXTURE_RECTANGLE_ANGLE;
+ case TextureType::CubeMap:
+ return GL_TEXTURE_CUBE_MAP;
+ case TextureType::CubeMapArray:
+ return GL_TEXTURE_CUBE_MAP_ARRAY;
+ case TextureType::VideoImage:
+ return GL_TEXTURE_VIDEO_IMAGE_WEBGL;
+ case TextureType::Buffer:
+ return GL_TEXTURE_BUFFER;
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+}
+
+std::ostream &operator<<(std::ostream &os, TextureType value)
+{
+ switch (value)
+ {
+ case TextureType::_2D:
+ os << "GL_TEXTURE_2D";
+ break;
+ case TextureType::_2DArray:
+ os << "GL_TEXTURE_2D_ARRAY";
+ break;
+ case TextureType::_2DMultisample:
+ os << "GL_TEXTURE_2D_MULTISAMPLE";
+ break;
+ case TextureType::_2DMultisampleArray:
+ os << "GL_TEXTURE_2D_MULTISAMPLE_ARRAY_OES";
+ break;
+ case TextureType::_3D:
+ os << "GL_TEXTURE_3D";
+ break;
+ case TextureType::External:
+ os << "GL_TEXTURE_EXTERNAL_OES";
+ break;
+ case TextureType::Rectangle:
+ os << "GL_TEXTURE_RECTANGLE_ANGLE";
+ break;
+ case TextureType::CubeMap:
+ os << "GL_TEXTURE_CUBE_MAP";
+ break;
+ case TextureType::CubeMapArray:
+ os << "GL_TEXTURE_CUBE_MAP_ARRAY";
+ break;
+ case TextureType::VideoImage:
+ os << "GL_TEXTURE_VIDEO_IMAGE_WEBGL";
+ break;
+ case TextureType::Buffer:
+ os << "GL_TEXTURE_BUFFER";
+ break;
+ default:
+ os << "GL_INVALID_ENUM";
+ break;
+ }
+ return os;
+}
+
+template <>
+VertexArrayType FromGLenum<VertexArrayType>(GLenum from)
+{
+ switch (from)
+ {
+ case GL_COLOR_ARRAY:
+ return VertexArrayType::Color;
+ case GL_NORMAL_ARRAY:
+ return VertexArrayType::Normal;
+ case GL_POINT_SIZE_ARRAY_OES:
+ return VertexArrayType::PointSize;
+ case GL_TEXTURE_COORD_ARRAY:
+ return VertexArrayType::TextureCoord;
+ case GL_VERTEX_ARRAY:
+ return VertexArrayType::Vertex;
+ default:
+ return VertexArrayType::InvalidEnum;
+ }
+}
+
+GLenum ToGLenum(VertexArrayType from)
+{
+ switch (from)
+ {
+ case VertexArrayType::Color:
+ return GL_COLOR_ARRAY;
+ case VertexArrayType::Normal:
+ return GL_NORMAL_ARRAY;
+ case VertexArrayType::PointSize:
+ return GL_POINT_SIZE_ARRAY_OES;
+ case VertexArrayType::TextureCoord:
+ return GL_TEXTURE_COORD_ARRAY;
+ case VertexArrayType::Vertex:
+ return GL_VERTEX_ARRAY;
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+}
+
+std::ostream &operator<<(std::ostream &os, VertexArrayType value)
+{
+ switch (value)
+ {
+ case VertexArrayType::Color:
+ os << "GL_COLOR_ARRAY";
+ break;
+ case VertexArrayType::Normal:
+ os << "GL_NORMAL_ARRAY";
+ break;
+ case VertexArrayType::PointSize:
+ os << "GL_POINT_SIZE_ARRAY_OES";
+ break;
+ case VertexArrayType::TextureCoord:
+ os << "GL_TEXTURE_COORD_ARRAY";
+ break;
+ case VertexArrayType::Vertex:
+ os << "GL_VERTEX_ARRAY";
+ break;
+ default:
+ os << "GL_INVALID_ENUM";
+ break;
+ }
+ return os;
+}
+
+template <>
+WrapMode FromGLenum<WrapMode>(GLenum from)
+{
+ switch (from)
+ {
+ case GL_CLAMP_TO_EDGE:
+ return WrapMode::ClampToEdge;
+ case GL_CLAMP_TO_BORDER:
+ return WrapMode::ClampToBorder;
+ case GL_MIRRORED_REPEAT:
+ return WrapMode::MirroredRepeat;
+ case GL_REPEAT:
+ return WrapMode::Repeat;
+ default:
+ return WrapMode::InvalidEnum;
+ }
+}
+
+GLenum ToGLenum(WrapMode from)
+{
+ switch (from)
+ {
+ case WrapMode::ClampToEdge:
+ return GL_CLAMP_TO_EDGE;
+ case WrapMode::ClampToBorder:
+ return GL_CLAMP_TO_BORDER;
+ case WrapMode::MirroredRepeat:
+ return GL_MIRRORED_REPEAT;
+ case WrapMode::Repeat:
+ return GL_REPEAT;
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+}
+
+std::ostream &operator<<(std::ostream &os, WrapMode value)
+{
+ switch (value)
+ {
+ case WrapMode::ClampToEdge:
+ os << "GL_CLAMP_TO_EDGE";
+ break;
+ case WrapMode::ClampToBorder:
+ os << "GL_CLAMP_TO_BORDER";
+ break;
+ case WrapMode::MirroredRepeat:
+ os << "GL_MIRRORED_REPEAT";
+ break;
+ case WrapMode::Repeat:
+ os << "GL_REPEAT";
+ break;
+ default:
+ os << "GL_INVALID_ENUM";
+ break;
+ }
+ return os;
+}
+
+} // namespace gl
diff --git a/gfx/angle/checkout/src/common/PackedGLEnums_autogen.h b/gfx/angle/checkout/src/common/PackedGLEnums_autogen.h
new file mode 100644
index 0000000000..452dca344e
--- /dev/null
+++ b/gfx/angle/checkout/src/common/PackedGLEnums_autogen.h
@@ -0,0 +1,610 @@
+// GENERATED FILE - DO NOT EDIT.
+// Generated by gen_packed_gl_enums.py using data from packed_gl_enums.json.
+//
+// Copyright 2017 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// PackedGLEnums_autogen.h:
+// Declares ANGLE-specific enums classes for GLenums and functions operating
+// on them.
+
+#ifndef COMMON_PACKEDGLENUMS_AUTOGEN_H_
+#define COMMON_PACKEDGLENUMS_AUTOGEN_H_
+
+#include <angle_gl.h>
+
+#include <cstdint>
+#include <ostream>
+
+namespace gl
+{
+
+template <typename Enum>
+Enum FromGLenum(GLenum from);
+
+enum class AlphaTestFunc : uint8_t
+{
+ AlwaysPass = 0,
+ Equal = 1,
+ Gequal = 2,
+ Greater = 3,
+ Lequal = 4,
+ Less = 5,
+ Never = 6,
+ NotEqual = 7,
+
+ InvalidEnum = 8,
+ EnumCount = 8,
+};
+
+template <>
+AlphaTestFunc FromGLenum<AlphaTestFunc>(GLenum from);
+GLenum ToGLenum(AlphaTestFunc from);
+std::ostream &operator<<(std::ostream &os, AlphaTestFunc value);
+
+enum class BufferBinding : uint8_t
+{
+ Array = 0,
+ AtomicCounter = 1,
+ CopyRead = 2,
+ CopyWrite = 3,
+ DispatchIndirect = 4,
+ DrawIndirect = 5,
+ ElementArray = 6,
+ PixelPack = 7,
+ PixelUnpack = 8,
+ ShaderStorage = 9,
+ Texture = 10,
+ TransformFeedback = 11,
+ Uniform = 12,
+
+ InvalidEnum = 13,
+ EnumCount = 13,
+};
+
+template <>
+BufferBinding FromGLenum<BufferBinding>(GLenum from);
+GLenum ToGLenum(BufferBinding from);
+std::ostream &operator<<(std::ostream &os, BufferBinding value);
+
+enum class BufferUsage : uint8_t
+{
+ DynamicCopy = 0,
+ DynamicDraw = 1,
+ DynamicRead = 2,
+ StaticCopy = 3,
+ StaticDraw = 4,
+ StaticRead = 5,
+ StreamCopy = 6,
+ StreamDraw = 7,
+ StreamRead = 8,
+
+ InvalidEnum = 9,
+ EnumCount = 9,
+};
+
+template <>
+BufferUsage FromGLenum<BufferUsage>(GLenum from);
+GLenum ToGLenum(BufferUsage from);
+std::ostream &operator<<(std::ostream &os, BufferUsage value);
+
+enum class ClientVertexArrayType : uint8_t
+{
+ Color = 0,
+ Normal = 1,
+ PointSize = 2,
+ TextureCoord = 3,
+ Vertex = 4,
+
+ InvalidEnum = 5,
+ EnumCount = 5,
+};
+
+template <>
+ClientVertexArrayType FromGLenum<ClientVertexArrayType>(GLenum from);
+GLenum ToGLenum(ClientVertexArrayType from);
+std::ostream &operator<<(std::ostream &os, ClientVertexArrayType value);
+
+enum class CullFaceMode : uint8_t
+{
+ Back = 0,
+ Front = 1,
+ FrontAndBack = 2,
+
+ InvalidEnum = 3,
+ EnumCount = 3,
+};
+
+template <>
+CullFaceMode FromGLenum<CullFaceMode>(GLenum from);
+GLenum ToGLenum(CullFaceMode from);
+std::ostream &operator<<(std::ostream &os, CullFaceMode value);
+
+enum class FilterMode : uint8_t
+{
+ Nearest = 0,
+ Linear = 1,
+ NearestMipmapNearest = 2,
+ NearestMipmapLinear = 3,
+ LinearMipmapLinear = 4,
+
+ InvalidEnum = 5,
+ EnumCount = 5,
+};
+
+template <>
+FilterMode FromGLenum<FilterMode>(GLenum from);
+GLenum ToGLenum(FilterMode from);
+std::ostream &operator<<(std::ostream &os, FilterMode value);
+
+enum class FogMode : uint8_t
+{
+ Exp = 0,
+ Exp2 = 1,
+ Linear = 2,
+
+ InvalidEnum = 3,
+ EnumCount = 3,
+};
+
+template <>
+FogMode FromGLenum<FogMode>(GLenum from);
+GLenum ToGLenum(FogMode from);
+std::ostream &operator<<(std::ostream &os, FogMode value);
+
+enum class GraphicsResetStatus : uint8_t
+{
+ NoError = 0,
+ GuiltyContextReset = 1,
+ InnocentContextReset = 2,
+ UnknownContextReset = 3,
+ PurgedContextResetNV = 4,
+
+ InvalidEnum = 5,
+ EnumCount = 5,
+};
+
+template <>
+GraphicsResetStatus FromGLenum<GraphicsResetStatus>(GLenum from);
+GLenum ToGLenum(GraphicsResetStatus from);
+std::ostream &operator<<(std::ostream &os, GraphicsResetStatus value);
+
+enum class HandleType : uint8_t
+{
+ OpaqueFd = 0,
+ ZirconVmo = 1,
+ ZirconEvent = 2,
+
+ InvalidEnum = 3,
+ EnumCount = 3,
+};
+
+template <>
+HandleType FromGLenum<HandleType>(GLenum from);
+GLenum ToGLenum(HandleType from);
+std::ostream &operator<<(std::ostream &os, HandleType value);
+
+enum class HintSetting : uint8_t
+{
+ DontCare = 0,
+ Fastest = 1,
+ Nicest = 2,
+
+ InvalidEnum = 3,
+ EnumCount = 3,
+};
+
+template <>
+HintSetting FromGLenum<HintSetting>(GLenum from);
+GLenum ToGLenum(HintSetting from);
+std::ostream &operator<<(std::ostream &os, HintSetting value);
+
+enum class ImageLayout : uint8_t
+{
+ Undefined = 0,
+ General = 1,
+ ColorAttachment = 2,
+ DepthStencilAttachment = 3,
+ DepthStencilReadOnlyAttachment = 4,
+ ShaderReadOnly = 5,
+ TransferSrc = 6,
+ TransferDst = 7,
+ DepthReadOnlyStencilAttachment = 8,
+ DepthAttachmentStencilReadOnly = 9,
+
+ InvalidEnum = 10,
+ EnumCount = 10,
+};
+
+template <>
+ImageLayout FromGLenum<ImageLayout>(GLenum from);
+GLenum ToGLenum(ImageLayout from);
+std::ostream &operator<<(std::ostream &os, ImageLayout value);
+
+enum class LightParameter : uint8_t
+{
+ Ambient = 0,
+ AmbientAndDiffuse = 1,
+ ConstantAttenuation = 2,
+ Diffuse = 3,
+ LinearAttenuation = 4,
+ Position = 5,
+ QuadraticAttenuation = 6,
+ Specular = 7,
+ SpotCutoff = 8,
+ SpotDirection = 9,
+ SpotExponent = 10,
+
+ InvalidEnum = 11,
+ EnumCount = 11,
+};
+
+template <>
+LightParameter FromGLenum<LightParameter>(GLenum from);
+GLenum ToGLenum(LightParameter from);
+std::ostream &operator<<(std::ostream &os, LightParameter value);
+
+enum class LogicalOperation : uint8_t
+{
+ And = 0,
+ AndInverted = 1,
+ AndReverse = 2,
+ Clear = 3,
+ Copy = 4,
+ CopyInverted = 5,
+ Equiv = 6,
+ Invert = 7,
+ Nand = 8,
+ Noop = 9,
+ Nor = 10,
+ Or = 11,
+ OrInverted = 12,
+ OrReverse = 13,
+ Set = 14,
+ Xor = 15,
+
+ InvalidEnum = 16,
+ EnumCount = 16,
+};
+
+template <>
+LogicalOperation FromGLenum<LogicalOperation>(GLenum from);
+GLenum ToGLenum(LogicalOperation from);
+std::ostream &operator<<(std::ostream &os, LogicalOperation value);
+
+enum class MaterialParameter : uint8_t
+{
+ Ambient = 0,
+ AmbientAndDiffuse = 1,
+ Diffuse = 2,
+ Emission = 3,
+ Shininess = 4,
+ Specular = 5,
+
+ InvalidEnum = 6,
+ EnumCount = 6,
+};
+
+template <>
+MaterialParameter FromGLenum<MaterialParameter>(GLenum from);
+GLenum ToGLenum(MaterialParameter from);
+std::ostream &operator<<(std::ostream &os, MaterialParameter value);
+
+enum class MatrixType : uint8_t
+{
+ Modelview = 0,
+ Projection = 1,
+ Texture = 2,
+
+ InvalidEnum = 3,
+ EnumCount = 3,
+};
+
+template <>
+MatrixType FromGLenum<MatrixType>(GLenum from);
+GLenum ToGLenum(MatrixType from);
+std::ostream &operator<<(std::ostream &os, MatrixType value);
+
+enum class PointParameter : uint8_t
+{
+ PointSizeMin = 0,
+ PointSizeMax = 1,
+ PointFadeThresholdSize = 2,
+ PointDistanceAttenuation = 3,
+
+ InvalidEnum = 4,
+ EnumCount = 4,
+};
+
+template <>
+PointParameter FromGLenum<PointParameter>(GLenum from);
+GLenum ToGLenum(PointParameter from);
+std::ostream &operator<<(std::ostream &os, PointParameter value);
+
+enum class ProvokingVertexConvention : uint8_t
+{
+ FirstVertexConvention = 0,
+ LastVertexConvention = 1,
+
+ InvalidEnum = 2,
+ EnumCount = 2,
+};
+
+template <>
+ProvokingVertexConvention FromGLenum<ProvokingVertexConvention>(GLenum from);
+GLenum ToGLenum(ProvokingVertexConvention from);
+std::ostream &operator<<(std::ostream &os, ProvokingVertexConvention value);
+
+enum class QueryType : uint8_t
+{
+ AnySamples = 0,
+ AnySamplesConservative = 1,
+ CommandsCompleted = 2,
+ PrimitivesGenerated = 3,
+ TimeElapsed = 4,
+ Timestamp = 5,
+ TransformFeedbackPrimitivesWritten = 6,
+
+ InvalidEnum = 7,
+ EnumCount = 7,
+};
+
+template <>
+QueryType FromGLenum<QueryType>(GLenum from);
+GLenum ToGLenum(QueryType from);
+std::ostream &operator<<(std::ostream &os, QueryType value);
+
+enum class ShaderType : uint8_t
+{
+ Vertex = 0,
+ TessControl = 1,
+ TessEvaluation = 2,
+ Geometry = 3,
+ Fragment = 4,
+ Compute = 5,
+
+ InvalidEnum = 6,
+ EnumCount = 6,
+};
+
+template <>
+ShaderType FromGLenum<ShaderType>(GLenum from);
+GLenum ToGLenum(ShaderType from);
+std::ostream &operator<<(std::ostream &os, ShaderType value);
+
+enum class ShadingModel : uint8_t
+{
+ Flat = 0,
+ Smooth = 1,
+
+ InvalidEnum = 2,
+ EnumCount = 2,
+};
+
+template <>
+ShadingModel FromGLenum<ShadingModel>(GLenum from);
+GLenum ToGLenum(ShadingModel from);
+std::ostream &operator<<(std::ostream &os, ShadingModel value);
+
+enum class ShadingRate : uint8_t
+{
+ Undefined = 0,
+ _1x1 = 1,
+ _1x2 = 2,
+ _2x1 = 3,
+ _2x2 = 4,
+ _4x2 = 5,
+ _4x4 = 6,
+
+ InvalidEnum = 7,
+ EnumCount = 7,
+};
+
+template <>
+ShadingRate FromGLenum<ShadingRate>(GLenum from);
+GLenum ToGLenum(ShadingRate from);
+std::ostream &operator<<(std::ostream &os, ShadingRate value);
+
+enum class TextureCombine : uint8_t
+{
+ Add = 0,
+ AddSigned = 1,
+ Dot3Rgb = 2,
+ Dot3Rgba = 3,
+ Interpolate = 4,
+ Modulate = 5,
+ Replace = 6,
+ Subtract = 7,
+
+ InvalidEnum = 8,
+ EnumCount = 8,
+};
+
+template <>
+TextureCombine FromGLenum<TextureCombine>(GLenum from);
+GLenum ToGLenum(TextureCombine from);
+std::ostream &operator<<(std::ostream &os, TextureCombine value);
+
+enum class TextureEnvMode : uint8_t
+{
+ Add = 0,
+ Blend = 1,
+ Combine = 2,
+ Decal = 3,
+ Modulate = 4,
+ Replace = 5,
+
+ InvalidEnum = 6,
+ EnumCount = 6,
+};
+
+template <>
+TextureEnvMode FromGLenum<TextureEnvMode>(GLenum from);
+GLenum ToGLenum(TextureEnvMode from);
+std::ostream &operator<<(std::ostream &os, TextureEnvMode value);
+
+enum class TextureEnvParameter : uint8_t
+{
+ Mode = 0,
+ Color = 1,
+ CombineRgb = 2,
+ CombineAlpha = 3,
+ RgbScale = 4,
+ AlphaScale = 5,
+ Src0Rgb = 6,
+ Src1Rgb = 7,
+ Src2Rgb = 8,
+ Src0Alpha = 9,
+ Src1Alpha = 10,
+ Src2Alpha = 11,
+ Op0Rgb = 12,
+ Op1Rgb = 13,
+ Op2Rgb = 14,
+ Op0Alpha = 15,
+ Op1Alpha = 16,
+ Op2Alpha = 17,
+ PointCoordReplace = 18,
+
+ InvalidEnum = 19,
+ EnumCount = 19,
+};
+
+template <>
+TextureEnvParameter FromGLenum<TextureEnvParameter>(GLenum from);
+GLenum ToGLenum(TextureEnvParameter from);
+std::ostream &operator<<(std::ostream &os, TextureEnvParameter value);
+
+enum class TextureEnvTarget : uint8_t
+{
+ Env = 0,
+ PointSprite = 1,
+
+ InvalidEnum = 2,
+ EnumCount = 2,
+};
+
+template <>
+TextureEnvTarget FromGLenum<TextureEnvTarget>(GLenum from);
+GLenum ToGLenum(TextureEnvTarget from);
+std::ostream &operator<<(std::ostream &os, TextureEnvTarget value);
+
+enum class TextureOp : uint8_t
+{
+ OneMinusSrcAlpha = 0,
+ OneMinusSrcColor = 1,
+ SrcAlpha = 2,
+ SrcColor = 3,
+
+ InvalidEnum = 4,
+ EnumCount = 4,
+};
+
+template <>
+TextureOp FromGLenum<TextureOp>(GLenum from);
+GLenum ToGLenum(TextureOp from);
+std::ostream &operator<<(std::ostream &os, TextureOp value);
+
+enum class TextureSrc : uint8_t
+{
+ Constant = 0,
+ Previous = 1,
+ PrimaryColor = 2,
+ Texture = 3,
+
+ InvalidEnum = 4,
+ EnumCount = 4,
+};
+
+template <>
+TextureSrc FromGLenum<TextureSrc>(GLenum from);
+GLenum ToGLenum(TextureSrc from);
+std::ostream &operator<<(std::ostream &os, TextureSrc value);
+
+enum class TextureTarget : uint8_t
+{
+ _2D = 0,
+ _2DArray = 1,
+ _2DMultisample = 2,
+ _2DMultisampleArray = 3,
+ _3D = 4,
+ External = 5,
+ Rectangle = 6,
+ CubeMapPositiveX = 7,
+ CubeMapNegativeX = 8,
+ CubeMapPositiveY = 9,
+ CubeMapNegativeY = 10,
+ CubeMapPositiveZ = 11,
+ CubeMapNegativeZ = 12,
+ CubeMapArray = 13,
+ VideoImage = 14,
+ Buffer = 15,
+
+ InvalidEnum = 16,
+ EnumCount = 16,
+};
+
+template <>
+TextureTarget FromGLenum<TextureTarget>(GLenum from);
+GLenum ToGLenum(TextureTarget from);
+std::ostream &operator<<(std::ostream &os, TextureTarget value);
+
+enum class TextureType : uint8_t
+{
+ _2D = 0,
+ _2DArray = 1,
+ _2DMultisample = 2,
+ _2DMultisampleArray = 3,
+ _3D = 4,
+ External = 5,
+ Rectangle = 6,
+ CubeMap = 7,
+ CubeMapArray = 8,
+ VideoImage = 9,
+ Buffer = 10,
+
+ InvalidEnum = 11,
+ EnumCount = 11,
+};
+
+template <>
+TextureType FromGLenum<TextureType>(GLenum from);
+GLenum ToGLenum(TextureType from);
+std::ostream &operator<<(std::ostream &os, TextureType value);
+
+enum class VertexArrayType : uint8_t
+{
+ Color = 0,
+ Normal = 1,
+ PointSize = 2,
+ TextureCoord = 3,
+ Vertex = 4,
+
+ InvalidEnum = 5,
+ EnumCount = 5,
+};
+
+template <>
+VertexArrayType FromGLenum<VertexArrayType>(GLenum from);
+GLenum ToGLenum(VertexArrayType from);
+std::ostream &operator<<(std::ostream &os, VertexArrayType value);
+
+enum class WrapMode : uint8_t
+{
+ ClampToEdge = 0,
+ ClampToBorder = 1,
+ MirroredRepeat = 2,
+ Repeat = 3,
+
+ InvalidEnum = 4,
+ EnumCount = 4,
+};
+
+template <>
+WrapMode FromGLenum<WrapMode>(GLenum from);
+GLenum ToGLenum(WrapMode from);
+std::ostream &operator<<(std::ostream &os, WrapMode value);
+
+} // namespace gl
+
+#endif // COMMON_PACKEDGLENUMS_AUTOGEN_H_
diff --git a/gfx/angle/checkout/src/common/PoolAlloc.cpp b/gfx/angle/checkout/src/common/PoolAlloc.cpp
new file mode 100644
index 0000000000..eef033ca04
--- /dev/null
+++ b/gfx/angle/checkout/src/common/PoolAlloc.cpp
@@ -0,0 +1,487 @@
+//
+// Copyright 2019 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// PoolAlloc.cpp:
+// Implements the class methods for PoolAllocator and Allocation classes.
+//
+
+#include "common/PoolAlloc.h"
+
+#include <assert.h>
+#include <stdint.h>
+#include <stdio.h>
+
+#include "common/angleutils.h"
+#include "common/debug.h"
+#include "common/mathutil.h"
+#include "common/platform.h"
+#include "common/tls.h"
+
+namespace angle
+{
+// If we are using guard blocks, we must track each individual allocation. If we aren't using guard
+// blocks, these never get instantiated, so won't have any impact.
+
+class Allocation
+{
+ public:
+ Allocation(size_t size, unsigned char *mem, Allocation *prev = 0)
+ : mSize(size), mMem(mem), mPrevAlloc(prev)
+ {
+ // Allocations are bracketed:
+ //
+ // [allocationHeader][initialGuardBlock][userData][finalGuardBlock]
+ //
+ // This would be cleaner with if (kGuardBlockSize)..., but that makes the compiler print
+ // warnings about 0 length memsets, even with the if() protecting them.
+#if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS)
+ memset(preGuard(), kGuardBlockBeginVal, kGuardBlockSize);
+ memset(data(), kUserDataFill, mSize);
+ memset(postGuard(), kGuardBlockEndVal, kGuardBlockSize);
+#endif
+ }
+
+ void checkAllocList() const;
+
+ static size_t AlignedHeaderSize(uint8_t *allocationBasePtr, size_t alignment)
+ {
+ // Make sure that the data offset after the header is aligned to the given alignment.
+ size_t base = reinterpret_cast<size_t>(allocationBasePtr);
+ return rx::roundUpPow2(base + kGuardBlockSize + HeaderSize(), alignment) - base;
+ }
+
+ // Return total size needed to accommodate user buffer of 'size',
+ // plus our tracking data and any necessary alignments.
+ static size_t AllocationSize(uint8_t *allocationBasePtr,
+ size_t size,
+ size_t alignment,
+ size_t *preAllocationPaddingOut)
+ {
+ // The allocation will be laid out as such:
+ //
+ // Aligned to |alignment|
+ // ^
+ // preAllocationPaddingOut |
+ // ___^___ |
+ // / \ |
+ // <padding>[header][guard][data][guard]
+ // \___________ __________/
+ // V
+ // dataOffset
+ //
+ // Note that alignment is at least as much as a pointer alignment, so the pointers in the
+ // header are also necessarily aligned appropriately.
+ //
+ size_t dataOffset = AlignedHeaderSize(allocationBasePtr, alignment);
+ *preAllocationPaddingOut = dataOffset - HeaderSize() - kGuardBlockSize;
+
+ return dataOffset + size + kGuardBlockSize;
+ }
+
+ // Given memory pointing to |header|, returns |data|.
+ static uint8_t *GetDataPointer(uint8_t *memory, size_t alignment)
+ {
+ uint8_t *alignedPtr = memory + kGuardBlockSize + HeaderSize();
+
+ // |memory| must be aligned already such that user data is aligned to |alignment|.
+ ASSERT((reinterpret_cast<uintptr_t>(alignedPtr) & (alignment - 1)) == 0);
+
+ return alignedPtr;
+ }
+
+ private:
+ void checkGuardBlock(unsigned char *blockMem, unsigned char val, const char *locText) const;
+
+ void checkAlloc() const
+ {
+ checkGuardBlock(preGuard(), kGuardBlockBeginVal, "before");
+ checkGuardBlock(postGuard(), kGuardBlockEndVal, "after");
+ }
+
+ // Find offsets to pre and post guard blocks, and user data buffer
+ unsigned char *preGuard() const { return mMem + HeaderSize(); }
+ unsigned char *data() const { return preGuard() + kGuardBlockSize; }
+ unsigned char *postGuard() const { return data() + mSize; }
+ size_t mSize; // size of the user data area
+ unsigned char *mMem; // beginning of our allocation (points to header)
+ Allocation *mPrevAlloc; // prior allocation in the chain
+
+ static constexpr unsigned char kGuardBlockBeginVal = 0xfb;
+ static constexpr unsigned char kGuardBlockEndVal = 0xfe;
+ static constexpr unsigned char kUserDataFill = 0xcd;
+#if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS)
+ static constexpr size_t kGuardBlockSize = 16;
+ static constexpr size_t HeaderSize() { return sizeof(Allocation); }
+#else
+ static constexpr size_t kGuardBlockSize = 0;
+ static constexpr size_t HeaderSize() { return 0; }
+#endif
+};
+
+#if !defined(ANGLE_DISABLE_POOL_ALLOC)
+class PageHeader
+{
+ public:
+ PageHeader(PageHeader *nextPage, size_t pageCount)
+ : nextPage(nextPage),
+ pageCount(pageCount)
+# if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS)
+ ,
+ lastAllocation(nullptr)
+# endif
+ {}
+
+ ~PageHeader()
+ {
+# if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS)
+ if (lastAllocation)
+ {
+ lastAllocation->checkAllocList();
+ }
+# endif
+ }
+
+ PageHeader *nextPage;
+ size_t pageCount;
+# if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS)
+ Allocation *lastAllocation;
+# endif
+};
+#endif
+
+//
+// Implement the functionality of the PoolAllocator class, which
+// is documented in PoolAlloc.h.
+//
+PoolAllocator::PoolAllocator(int growthIncrement, int allocationAlignment)
+ : mAlignment(allocationAlignment),
+#if !defined(ANGLE_DISABLE_POOL_ALLOC)
+ mPageSize(growthIncrement),
+ mFreeList(nullptr),
+ mInUseList(nullptr),
+ mNumCalls(0),
+ mTotalBytes(0),
+#endif
+ mLocked(false)
+{
+ initialize(growthIncrement, allocationAlignment);
+}
+
+void PoolAllocator::initialize(int pageSize, int alignment)
+{
+ mAlignment = alignment;
+#if !defined(ANGLE_DISABLE_POOL_ALLOC)
+ mPageSize = pageSize;
+ mPageHeaderSkip = sizeof(PageHeader);
+
+ // Alignment == 1 is a special fast-path where fastAllocate() is enabled
+ if (mAlignment != 1)
+ {
+#endif
+ // Adjust mAlignment to be at least pointer aligned and
+ // power of 2.
+ //
+ size_t minAlign = sizeof(void *);
+ if (mAlignment < minAlign)
+ {
+ mAlignment = minAlign;
+ }
+ mAlignment = gl::ceilPow2(static_cast<unsigned int>(mAlignment));
+#if !defined(ANGLE_DISABLE_POOL_ALLOC)
+ }
+ //
+ // Don't allow page sizes we know are smaller than all common
+ // OS page sizes.
+ //
+ if (mPageSize < 4 * 1024)
+ {
+ mPageSize = 4 * 1024;
+ }
+
+ //
+ // A large mCurrentPageOffset indicates a new page needs to
+ // be obtained to allocate memory.
+ //
+ mCurrentPageOffset = mPageSize;
+
+#else // !defined(ANGLE_DISABLE_POOL_ALLOC)
+ mStack.push_back({});
+#endif
+}
+
+PoolAllocator::~PoolAllocator()
+{
+#if !defined(ANGLE_DISABLE_POOL_ALLOC)
+ while (mInUseList)
+ {
+ PageHeader *next = mInUseList->nextPage;
+ mInUseList->~PageHeader();
+ delete[] reinterpret_cast<char *>(mInUseList);
+ mInUseList = next;
+ }
+ // We should not check the guard blocks
+ // here, because we did it already when the block was
+ // placed into the free list.
+ //
+ while (mFreeList)
+ {
+ PageHeader *next = mFreeList->nextPage;
+ delete[] reinterpret_cast<char *>(mFreeList);
+ mFreeList = next;
+ }
+#else // !defined(ANGLE_DISABLE_POOL_ALLOC)
+ for (auto &allocs : mStack)
+ {
+ for (auto alloc : allocs)
+ {
+ free(alloc);
+ }
+ }
+ mStack.clear();
+#endif
+}
+
+//
+// Check a single guard block for damage
+//
+void Allocation::checkGuardBlock(unsigned char *blockMem,
+ unsigned char val,
+ const char *locText) const
+{
+#if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS)
+ for (size_t x = 0; x < kGuardBlockSize; x++)
+ {
+ if (blockMem[x] != val)
+ {
+ char assertMsg[80];
+ // We don't print the assert message. It's here just to be helpful.
+ snprintf(assertMsg, sizeof(assertMsg),
+ "PoolAlloc: Damage %s %zu byte allocation at 0x%p\n", locText, mSize, data());
+ assert(0 && "PoolAlloc: Damage in guard block");
+ }
+ }
+#endif
+}
+
+void PoolAllocator::push()
+{
+#if !defined(ANGLE_DISABLE_POOL_ALLOC)
+ AllocState state = {mCurrentPageOffset, mInUseList};
+
+ mStack.push_back(state);
+
+ //
+ // Indicate there is no current page to allocate from.
+ //
+ mCurrentPageOffset = mPageSize;
+#else // !defined(ANGLE_DISABLE_POOL_ALLOC)
+ mStack.push_back({});
+#endif
+}
+
+// Do a mass-deallocation of all the individual allocations that have occurred since the last
+// push(), or since the last pop(), or since the object's creation.
+//
+// The deallocated pages are saved for future allocations.
+void PoolAllocator::pop()
+{
+ if (mStack.size() < 1)
+ {
+ return;
+ }
+
+#if !defined(ANGLE_DISABLE_POOL_ALLOC)
+ PageHeader *page = mStack.back().page;
+ mCurrentPageOffset = mStack.back().offset;
+
+ while (mInUseList != page)
+ {
+ // invoke destructor to free allocation list
+ mInUseList->~PageHeader();
+
+ PageHeader *nextInUse = mInUseList->nextPage;
+ if (mInUseList->pageCount > 1)
+ {
+ delete[] reinterpret_cast<char *>(mInUseList);
+ }
+ else
+ {
+ mInUseList->nextPage = mFreeList;
+ mFreeList = mInUseList;
+ }
+ mInUseList = nextInUse;
+ }
+
+ mStack.pop_back();
+#else // !defined(ANGLE_DISABLE_POOL_ALLOC)
+ for (auto &alloc : mStack.back())
+ {
+ free(alloc);
+ }
+ mStack.pop_back();
+#endif
+}
+
+//
+// Do a mass-deallocation of all the individual allocations
+// that have occurred.
+//
+void PoolAllocator::popAll()
+{
+ while (mStack.size() > 0)
+ pop();
+}
+
+void *PoolAllocator::allocate(size_t numBytes)
+{
+ ASSERT(!mLocked);
+
+#if !defined(ANGLE_DISABLE_POOL_ALLOC)
+ //
+ // Just keep some interesting statistics.
+ //
+ ++mNumCalls;
+ mTotalBytes += numBytes;
+
+ uint8_t *currentPagePtr = reinterpret_cast<uint8_t *>(mInUseList) + mCurrentPageOffset;
+
+ size_t preAllocationPadding = 0;
+ size_t allocationSize =
+ Allocation::AllocationSize(currentPagePtr, numBytes, mAlignment, &preAllocationPadding);
+
+ // Integer overflow is unexpected.
+ ASSERT(allocationSize >= numBytes);
+
+ // Do the allocation, most likely case first, for efficiency.
+ if (allocationSize <= mPageSize - mCurrentPageOffset)
+ {
+ // There is enough room to allocate from the current page at mCurrentPageOffset.
+ uint8_t *memory = currentPagePtr + preAllocationPadding;
+ mCurrentPageOffset += allocationSize;
+
+ return initializeAllocation(memory, numBytes);
+ }
+
+ if (allocationSize > mPageSize - mPageHeaderSkip)
+ {
+ // If the allocation is larger than a whole page, do a multi-page allocation. These are not
+ // mixed with the others. The OS is efficient in allocating and freeing multiple pages.
+
+ // We don't know what the alignment of the new allocated memory will be, so conservatively
+ // allocate enough memory for up to alignment extra bytes being needed.
+ allocationSize = Allocation::AllocationSize(reinterpret_cast<uint8_t *>(mPageHeaderSkip),
+ numBytes, mAlignment, &preAllocationPadding);
+
+ size_t numBytesToAlloc = allocationSize + mPageHeaderSkip + mAlignment;
+
+ // Integer overflow is unexpected.
+ ASSERT(numBytesToAlloc >= allocationSize);
+
+ PageHeader *memory = reinterpret_cast<PageHeader *>(::new char[numBytesToAlloc]);
+ if (memory == nullptr)
+ {
+ return nullptr;
+ }
+
+ // Use placement-new to initialize header
+ new (memory) PageHeader(mInUseList, (numBytesToAlloc + mPageSize - 1) / mPageSize);
+ mInUseList = memory;
+
+ // Make next allocation come from a new page
+ mCurrentPageOffset = mPageSize;
+
+ // Now that we actually have the pointer, make sure the data pointer will be aligned.
+ currentPagePtr = reinterpret_cast<uint8_t *>(memory) + mPageHeaderSkip;
+ Allocation::AllocationSize(currentPagePtr, numBytes, mAlignment, &preAllocationPadding);
+
+ return initializeAllocation(currentPagePtr + preAllocationPadding, numBytes);
+ }
+
+ uint8_t *newPageAddr = allocateNewPage(numBytes);
+ return initializeAllocation(newPageAddr, numBytes);
+
+#else // !defined(ANGLE_DISABLE_POOL_ALLOC)
+
+ void *alloc = malloc(numBytes + mAlignment - 1);
+ mStack.back().push_back(alloc);
+
+ intptr_t intAlloc = reinterpret_cast<intptr_t>(alloc);
+ intAlloc = rx::roundUpPow2<intptr_t>(intAlloc, mAlignment);
+ return reinterpret_cast<void *>(intAlloc);
+#endif
+}
+
+#if !defined(ANGLE_DISABLE_POOL_ALLOC)
+uint8_t *PoolAllocator::allocateNewPage(size_t numBytes)
+{
+ // Need a simple page to allocate from. Pick a page from the free list, if any. Otherwise need
+ // to make the allocation.
+ PageHeader *memory;
+ if (mFreeList)
+ {
+ memory = mFreeList;
+ mFreeList = mFreeList->nextPage;
+ }
+ else
+ {
+ memory = reinterpret_cast<PageHeader *>(::new char[mPageSize]);
+ if (memory == nullptr)
+ {
+ return nullptr;
+ }
+ }
+ // Use placement-new to initialize header
+ new (memory) PageHeader(mInUseList, 1);
+ mInUseList = memory;
+
+ // Leave room for the page header.
+ mCurrentPageOffset = mPageHeaderSkip;
+ uint8_t *currentPagePtr = reinterpret_cast<uint8_t *>(mInUseList) + mCurrentPageOffset;
+
+ size_t preAllocationPadding = 0;
+ size_t allocationSize =
+ Allocation::AllocationSize(currentPagePtr, numBytes, mAlignment, &preAllocationPadding);
+
+ mCurrentPageOffset += allocationSize;
+
+ // The new allocation is made after the page header and any alignment required before it.
+ return reinterpret_cast<uint8_t *>(mInUseList) + mPageHeaderSkip + preAllocationPadding;
+}
+
+void *PoolAllocator::initializeAllocation(uint8_t *memory, size_t numBytes)
+{
+# if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS)
+ new (memory) Allocation(numBytes, memory, mInUseList->lastAllocation);
+ mInUseList->lastAllocation = reinterpret_cast<Allocation *>(memory);
+# endif
+
+ return Allocation::GetDataPointer(memory, mAlignment);
+}
+#endif
+
+void PoolAllocator::lock()
+{
+ ASSERT(!mLocked);
+ mLocked = true;
+}
+
+void PoolAllocator::unlock()
+{
+ ASSERT(mLocked);
+ mLocked = false;
+}
+
+//
+// Check all allocations in a list for damage by calling check on each.
+//
+void Allocation::checkAllocList() const
+{
+ for (const Allocation *alloc = this; alloc != nullptr; alloc = alloc->mPrevAlloc)
+ {
+ alloc->checkAlloc();
+ }
+}
+
+} // namespace angle
diff --git a/gfx/angle/checkout/src/common/PoolAlloc.h b/gfx/angle/checkout/src/common/PoolAlloc.h
new file mode 100644
index 0000000000..536848f198
--- /dev/null
+++ b/gfx/angle/checkout/src/common/PoolAlloc.h
@@ -0,0 +1,181 @@
+//
+// Copyright 2019 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// PoolAlloc.h:
+// Defines the class interface for PoolAllocator.
+//
+
+#ifndef COMMON_POOLALLOC_H_
+#define COMMON_POOLALLOC_H_
+
+#if !defined(NDEBUG)
+# define ANGLE_POOL_ALLOC_GUARD_BLOCKS // define to enable guard block checking
+#endif
+
+//
+// This header defines an allocator that can be used to efficiently
+// allocate a large number of small requests for heap memory, with the
+// intention that they are not individually deallocated, but rather
+// collectively deallocated at one time.
+//
+// This simultaneously
+//
+// * Makes each individual allocation much more efficient; the
+// typical allocation is trivial.
+// * Completely avoids the cost of doing individual deallocation.
+// * Saves the trouble of tracking down and plugging a large class of leaks.
+//
+// Individual classes can use this allocator by supplying their own
+// new and delete methods.
+//
+
+#include "angleutils.h"
+#include "common/debug.h"
+
+namespace angle
+{
+class Allocation;
+class PageHeader;
+
+//
+// There are several stacks. One is to track the pushing and popping
+// of the user, and not yet implemented. The others are simply a
+// repositories of free pages or used pages.
+//
+// Page stacks are linked together with a simple header at the beginning
+// of each allocation obtained from the underlying OS. Multi-page allocations
+// are returned to the OS. Individual page allocations are kept for future
+// re-use.
+//
+// The "page size" used is not, nor must it match, the underlying OS
+// page size. But, having it be about that size or equal to a set of
+// pages is likely most optimal.
+//
+class PoolAllocator : angle::NonCopyable
+{
+ public:
+ static const int kDefaultAlignment = sizeof(void *);
+ //
+ // Create PoolAllocator. If alignment is set to 1 byte then fastAllocate()
+ // function can be used to make allocations with less overhead.
+ //
+ PoolAllocator(int growthIncrement = 8 * 1024, int allocationAlignment = kDefaultAlignment);
+
+ //
+ // Don't call the destructor just to free up the memory, call pop()
+ //
+ ~PoolAllocator();
+
+ //
+ // Initialize page size and alignment after construction
+ //
+ void initialize(int pageSize, int alignment);
+
+ //
+ // Call push() to establish a new place to pop memory to. Does not
+ // have to be called to get things started.
+ //
+ void push();
+
+ //
+ // Call pop() to free all memory allocated since the last call to push(),
+ // or if no last call to push, frees all memory since first allocation.
+ //
+ void pop();
+
+ //
+ // Call popAll() to free all memory allocated.
+ //
+ void popAll();
+
+ //
+ // Call allocate() to actually acquire memory. Returns 0 if no memory
+ // available, otherwise a properly aligned pointer to 'numBytes' of memory.
+ //
+ void *allocate(size_t numBytes);
+
+ //
+ // Call fastAllocate() for a faster allocate function that does minimal bookkeeping
+ // preCondition: Allocator must have been created w/ alignment of 1
+ ANGLE_INLINE uint8_t *fastAllocate(size_t numBytes)
+ {
+#if defined(ANGLE_DISABLE_POOL_ALLOC)
+ return reinterpret_cast<uint8_t *>(allocate(numBytes));
+#else
+ ASSERT(mAlignment == 1);
+ // No multi-page allocations
+ ASSERT(numBytes <= (mPageSize - mPageHeaderSkip));
+ //
+ // Do the allocation, most likely case inline first, for efficiency.
+ //
+ if (numBytes <= mPageSize - mCurrentPageOffset)
+ {
+ //
+ // Safe to allocate from mCurrentPageOffset.
+ //
+ uint8_t *memory = reinterpret_cast<uint8_t *>(mInUseList) + mCurrentPageOffset;
+ mCurrentPageOffset += numBytes;
+ return memory;
+ }
+ return allocateNewPage(numBytes);
+#endif
+ }
+
+ // There is no deallocate. The point of this class is that deallocation can be skipped by the
+ // user of it, as the model of use is to simultaneously deallocate everything at once by calling
+ // pop(), and to not have to solve memory leak problems.
+
+ // Catch unwanted allocations.
+ // TODO(jmadill): Remove this when we remove the global allocator.
+ void lock();
+ void unlock();
+
+ private:
+ size_t mAlignment; // all returned allocations will be aligned at
+ // this granularity, which will be a power of 2
+#if !defined(ANGLE_DISABLE_POOL_ALLOC)
+ struct AllocState
+ {
+ size_t offset;
+ PageHeader *page;
+ };
+ using AllocStack = std::vector<AllocState>;
+
+ // Slow path of allocation when we have to get a new page.
+ uint8_t *allocateNewPage(size_t numBytes);
+ // Track allocations if and only if we're using guard blocks
+ void *initializeAllocation(uint8_t *memory, size_t numBytes);
+
+ // Granularity of allocation from the OS
+ size_t mPageSize;
+ // Amount of memory to skip to make room for the page header (which is the size of the page
+ // header, or PageHeader in PoolAlloc.cpp)
+ size_t mPageHeaderSkip;
+ // Next offset in top of inUseList to allocate from. This offset is not necessarily aligned to
+ // anything. When an allocation is made, the data is aligned to mAlignment, and the header (if
+ // any) will align to pointer size by extension (since mAlignment is made aligned to at least
+ // pointer size).
+ size_t mCurrentPageOffset;
+ // List of popped memory
+ PageHeader *mFreeList;
+ // List of all memory currently being used. The head of this list is where allocations are
+ // currently being made from.
+ PageHeader *mInUseList;
+ // Stack of where to allocate from, to partition pool
+ AllocStack mStack;
+
+ int mNumCalls; // just an interesting statistic
+ size_t mTotalBytes; // just an interesting statistic
+
+#else // !defined(ANGLE_DISABLE_POOL_ALLOC)
+ std::vector<std::vector<void *>> mStack;
+#endif
+
+ bool mLocked;
+};
+
+} // namespace angle
+
+#endif // COMMON_POOLALLOC_H_
diff --git a/gfx/angle/checkout/src/common/Spinlock.h b/gfx/angle/checkout/src/common/Spinlock.h
new file mode 100644
index 0000000000..494da0943e
--- /dev/null
+++ b/gfx/angle/checkout/src/common/Spinlock.h
@@ -0,0 +1,71 @@
+//
+// Copyright 2021 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Spinlock.h:
+// Spinlock is a lock that loops actively until it gets the resource.
+// Only use it when the lock will be granted in reasonably short time.
+
+#ifndef COMMON_SPINLOCK_H_
+#define COMMON_SPINLOCK_H_
+
+#include <atomic>
+
+// TODO(jplate) Add pause for ARM, http://anglebug.com:6067
+#if defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_X64))
+extern "C" void _mm_pause();
+# pragma intrinsic(_mm_pause)
+# define ANGLE_SMT_PAUSE() _mm_pause()
+#elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
+# define ANGLE_SMT_PAUSE() __asm__ __volatile__("pause;")
+#else
+# define ANGLE_SMT_PAUSE() static_cast<void>(0)
+#endif
+
+namespace angle
+{
+
+class Spinlock
+{
+ public:
+ Spinlock() noexcept;
+
+ bool try_lock() noexcept;
+ void lock() noexcept;
+ void unlock() noexcept;
+
+ private:
+ std::atomic_bool mLock;
+};
+
+inline Spinlock::Spinlock() noexcept : mLock(false) {}
+
+inline bool Spinlock::try_lock() noexcept
+{
+ // Relaxed check first to prevent unnecessary cache misses.
+ return !mLock.load(std::memory_order_relaxed) &&
+ !mLock.exchange(true, std::memory_order_acquire);
+}
+
+inline void Spinlock::lock() noexcept
+{
+ while (mLock.exchange(true, std::memory_order_acquire))
+ {
+ // Relaxed wait to prevent unnecessary cache misses.
+ while (mLock.load(std::memory_order_relaxed))
+ {
+ // Optimization for simultaneous multithreading.
+ ANGLE_SMT_PAUSE();
+ }
+ }
+}
+
+inline void Spinlock::unlock() noexcept
+{
+ mLock.store(false, std::memory_order_release);
+}
+
+} // namespace angle
+
+#endif // COMMON_SPINLOCK_H_
diff --git a/gfx/angle/checkout/src/common/SynchronizedValue.h b/gfx/angle/checkout/src/common/SynchronizedValue.h
new file mode 100644
index 0000000000..95432cfbcd
--- /dev/null
+++ b/gfx/angle/checkout/src/common/SynchronizedValue.h
@@ -0,0 +1,540 @@
+//
+// Copyright 2021 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// SynchronizedValue.h:
+// A class that ensures that the correct mutex is locked when the encapsulated data is accessed.
+// Based on boost::synchronized_value, which probably becomes part of the next C++ standard.
+// https://www.boost.org/doc/libs/1_76_0/doc/html/thread/sds.html#thread.sds.synchronized_valuesxxx
+
+#ifndef COMMON_SYNCHRONIZEDVALUE_H_
+#define COMMON_SYNCHRONIZEDVALUE_H_
+
+#include "common/debug.h"
+
+#include <mutex>
+#include <type_traits>
+
+namespace angle
+{
+
+template <typename T, typename Lockable = std::mutex>
+class ConstStrictLockPtr
+{
+ public:
+ using value_type = T;
+ using mutex_type = Lockable;
+
+ ConstStrictLockPtr(const T &value, Lockable &mutex) : mLock(mutex), mValue(value) {}
+ ConstStrictLockPtr(const T &value, Lockable &mutex, std::adopt_lock_t) noexcept
+ : mLock(mutex, std::adopt_lock), mValue(value)
+ {}
+
+ ConstStrictLockPtr(ConstStrictLockPtr &&other) noexcept
+ : mLock(std::move(other.mLock)), mValue(other.mValue)
+ {}
+
+ ConstStrictLockPtr(const ConstStrictLockPtr &) = delete;
+ ConstStrictLockPtr &operator=(const ConstStrictLockPtr &) = delete;
+
+ ~ConstStrictLockPtr() = default;
+
+ const T *operator->() const { return &mValue; }
+ const T &operator*() const { return mValue; }
+
+ protected:
+ std::unique_lock<Lockable> mLock;
+ T const &mValue;
+};
+
+template <typename T, typename Lockable = std::mutex>
+class StrictLockPtr : public ConstStrictLockPtr<T, Lockable>
+{
+ private:
+ using BaseType = ConstStrictLockPtr<T, Lockable>;
+
+ public:
+ StrictLockPtr(T &value, Lockable &mutex) : BaseType(value, mutex) {}
+ StrictLockPtr(T &value, Lockable &mutex, std::adopt_lock_t) noexcept
+ : BaseType(value, mutex, std::adopt_lock)
+ {}
+
+ StrictLockPtr(StrictLockPtr &&other) noexcept
+ : BaseType(std::move(static_cast<BaseType &&>(other)))
+ {}
+
+ StrictLockPtr(const StrictLockPtr &) = delete;
+ StrictLockPtr &operator=(const StrictLockPtr &) = delete;
+
+ ~StrictLockPtr() = default;
+
+ T *operator->() { return const_cast<T *>(&this->mValue); }
+ T &operator*() { return const_cast<T &>(this->mValue); }
+};
+
+template <typename SV>
+struct SynchronizedValueStrictLockPtr
+{
+ using type = StrictLockPtr<typename SV::value_type, typename SV::mutex_type>;
+};
+
+template <typename SV>
+struct SynchronizedValueStrictLockPtr<const SV>
+{
+ using type = ConstStrictLockPtr<typename SV::value_type, typename SV::mutex_type>;
+};
+
+template <typename T, typename Lockable = std::mutex>
+class ConstUniqueLockPtr : public std::unique_lock<Lockable>
+{
+ private:
+ using BaseType = std::unique_lock<Lockable>;
+
+ public:
+ using value_type = T;
+ using mutex_type = Lockable;
+
+ ConstUniqueLockPtr(const T &value, Lockable &mutex) : BaseType(mutex), mValue(value) {}
+ ConstUniqueLockPtr(const T &value, Lockable &mutex, std::adopt_lock_t) noexcept
+ : BaseType(mutex, std::adopt_lock), mValue(value)
+ {}
+ ConstUniqueLockPtr(const T &value, Lockable &mutex, std::defer_lock_t) noexcept
+ : BaseType(mutex, std::defer_lock), mValue(value)
+ {}
+ ConstUniqueLockPtr(const T &value, Lockable &mutex, std::try_to_lock_t) noexcept
+ : BaseType(mutex, std::try_to_lock), mValue(value)
+ {}
+
+ ConstUniqueLockPtr(ConstUniqueLockPtr &&other) noexcept
+ : BaseType(std::move(static_cast<BaseType &&>(other))), mValue(other.mValue)
+ {}
+
+ ConstUniqueLockPtr(const ConstUniqueLockPtr &) = delete;
+ ConstUniqueLockPtr &operator=(const ConstUniqueLockPtr &) = delete;
+
+ ~ConstUniqueLockPtr() = default;
+
+ const T *operator->() const
+ {
+ ASSERT(this->owns_lock());
+ return &mValue;
+ }
+ const T &operator*() const
+ {
+ ASSERT(this->owns_lock());
+ return mValue;
+ }
+
+ protected:
+ T const &mValue;
+};
+
+template <typename T, typename Lockable = std::mutex>
+class UniqueLockPtr : public ConstUniqueLockPtr<T, Lockable>
+{
+ private:
+ using BaseType = ConstUniqueLockPtr<T, Lockable>;
+
+ public:
+ UniqueLockPtr(T &value, Lockable &mutex) : BaseType(value, mutex) {}
+ UniqueLockPtr(T &value, Lockable &mutex, std::adopt_lock_t) noexcept
+ : BaseType(value, mutex, std::adopt_lock)
+ {}
+ UniqueLockPtr(T &value, Lockable &mutex, std::defer_lock_t) noexcept
+ : BaseType(value, mutex, std::defer_lock)
+ {}
+ UniqueLockPtr(T &value, Lockable &mutex, std::try_to_lock_t) noexcept
+ : BaseType(value, mutex, std::try_to_lock)
+ {}
+
+ UniqueLockPtr(UniqueLockPtr &&other) noexcept
+ : BaseType(std::move(static_cast<BaseType &&>(other)))
+ {}
+
+ UniqueLockPtr(const UniqueLockPtr &) = delete;
+ UniqueLockPtr &operator=(const UniqueLockPtr &) = delete;
+
+ ~UniqueLockPtr() = default;
+
+ T *operator->()
+ {
+ ASSERT(this->owns_lock());
+ return const_cast<T *>(&this->mValue);
+ }
+ T &operator*()
+ {
+ ASSERT(this->owns_lock());
+ return const_cast<T &>(this->mValue);
+ }
+};
+
+template <typename SV>
+struct SynchronizedValueUniqueLockPtr
+{
+ using type = UniqueLockPtr<typename SV::value_type, typename SV::mutex_type>;
+};
+
+template <typename SV>
+struct SynchronizedValueUniqueLockPtr<const SV>
+{
+ using type = ConstUniqueLockPtr<typename SV::value_type, typename SV::mutex_type>;
+};
+
+template <typename T, typename Lockable = std::mutex>
+class SynchronizedValue
+{
+ public:
+ using value_type = T;
+ using mutex_type = Lockable;
+
+ SynchronizedValue() noexcept(std::is_nothrow_default_constructible<T>::value) : mValue() {}
+
+ SynchronizedValue(const T &other) noexcept(std::is_nothrow_copy_constructible<T>::value)
+ : mValue(other)
+ {}
+
+ SynchronizedValue(T &&other) noexcept(std::is_nothrow_move_constructible<T>::value)
+ : mValue(std::move(other))
+ {}
+
+ template <typename... Args>
+ SynchronizedValue(Args &&... args) noexcept(noexcept(T(std::forward<Args>(args)...)))
+ : mValue(std::forward<Args>(args)...)
+ {}
+
+ SynchronizedValue(const SynchronizedValue &other)
+ {
+ std::lock_guard<Lockable> lock(other.mMutex);
+ mValue = other.mValue;
+ }
+
+ SynchronizedValue(SynchronizedValue &&other)
+ {
+ std::lock_guard<Lockable> lock(other.mMutex);
+ mValue = std::move(other.mValue);
+ }
+
+ SynchronizedValue &operator=(const SynchronizedValue &other)
+ {
+ if (&other != this)
+ {
+ std::unique_lock<Lockable> lock1(mMutex, std::defer_lock);
+ std::unique_lock<Lockable> lock2(other.mMutex, std::defer_lock);
+ std::lock(lock1, lock2);
+ mValue = other.mValue;
+ }
+ return *this;
+ }
+
+ SynchronizedValue &operator=(SynchronizedValue &&other)
+ {
+ if (&other != this)
+ {
+ std::unique_lock<Lockable> lock1(mMutex, std::defer_lock);
+ std::unique_lock<Lockable> lock2(other.mMutex, std::defer_lock);
+ std::lock(lock1, lock2);
+ mValue = std::move(other.mValue);
+ }
+ return *this;
+ }
+
+ SynchronizedValue &operator=(const T &value)
+ {
+ {
+ std::lock_guard<Lockable> lock(mMutex);
+ mValue = value;
+ }
+ return *this;
+ }
+
+ SynchronizedValue &operator=(T &&value)
+ {
+ {
+ std::lock_guard<Lockable> lock(mMutex);
+ mValue = std::move(value);
+ }
+ return *this;
+ }
+
+ T get() const
+ {
+ std::lock_guard<Lockable> lock(mMutex);
+ return mValue;
+ }
+
+ explicit operator T() const { return get(); }
+
+ void swap(SynchronizedValue &other)
+ {
+ if (this == &other)
+ {
+ return;
+ }
+ std::unique_lock<Lockable> lock1(mMutex, std::defer_lock);
+ std::unique_lock<Lockable> lock2(other.mMutex, std::defer_lock);
+ std::lock(lock1, lock2);
+ std::swap(mValue, other.mValue);
+ }
+
+ void swap(T &other)
+ {
+ std::lock_guard<Lockable> lock(mMutex);
+ std::swap(mValue, other);
+ }
+
+ StrictLockPtr<T, Lockable> operator->() { return StrictLockPtr<T, Lockable>(mValue, mMutex); }
+ ConstStrictLockPtr<T, Lockable> operator->() const
+ {
+ return ConstStrictLockPtr<T, Lockable>(mValue, mMutex);
+ }
+
+ StrictLockPtr<T, Lockable> synchronize() { return StrictLockPtr<T, Lockable>(mValue, mMutex); }
+ ConstStrictLockPtr<T, Lockable> synchronize() const
+ {
+ return ConstStrictLockPtr<T, Lockable>(mValue, mMutex);
+ }
+
+ UniqueLockPtr<T, Lockable> unique_synchronize()
+ {
+ return UniqueLockPtr<T, Lockable>(mValue, mMutex);
+ }
+ ConstUniqueLockPtr<T, Lockable> unique_synchronize() const
+ {
+ return ConstUniqueLockPtr<T, Lockable>(mValue, mMutex);
+ }
+
+ UniqueLockPtr<T, Lockable> defer_synchronize() noexcept
+ {
+ return UniqueLockPtr<T, Lockable>(mValue, mMutex, std::defer_lock);
+ }
+ ConstUniqueLockPtr<T, Lockable> defer_synchronize() const noexcept
+ {
+ return ConstUniqueLockPtr<T, Lockable>(mValue, mMutex, std::defer_lock);
+ }
+
+ UniqueLockPtr<T, Lockable> try_to_synchronize() noexcept
+ {
+ return UniqueLockPtr<T, Lockable>(mValue, mMutex, std::try_to_lock);
+ }
+ ConstUniqueLockPtr<T, Lockable> try_to_synchronize() const noexcept
+ {
+ return ConstUniqueLockPtr<T, Lockable>(mValue, mMutex, std::try_to_lock);
+ }
+
+ UniqueLockPtr<T, Lockable> adopt_synchronize() noexcept
+ {
+ return UniqueLockPtr<T, Lockable>(mValue, mMutex, std::adopt_lock);
+ }
+ ConstUniqueLockPtr<T, Lockable> adopt_synchronize() const noexcept
+ {
+ return ConstUniqueLockPtr<T, Lockable>(mValue, mMutex, std::adopt_lock);
+ }
+
+ class DerefValue
+ {
+ public:
+ DerefValue(DerefValue &&other) : mLock(std::move(other.mLock)), mValue(other.mValue) {}
+
+ DerefValue(const DerefValue &) = delete;
+ DerefValue &operator=(const DerefValue &) = delete;
+
+ operator T &() { return mValue; }
+
+ DerefValue &operator=(const T &other)
+ {
+ mValue = other;
+ return *this;
+ }
+
+ private:
+ explicit DerefValue(SynchronizedValue &outer) : mLock(outer.mMutex), mValue(outer.mValue) {}
+
+ std::unique_lock<Lockable> mLock;
+ T &mValue;
+
+ friend class SynchronizedValue;
+ };
+
+ class ConstDerefValue
+ {
+ public:
+ ConstDerefValue(ConstDerefValue &&other)
+ : mLock(std::move(other.mLock)), mValue(other.mValue)
+ {}
+
+ ConstDerefValue(const ConstDerefValue &) = delete;
+ ConstDerefValue &operator=(const ConstDerefValue &) = delete;
+
+ operator const T &() { return mValue; }
+
+ private:
+ explicit ConstDerefValue(const SynchronizedValue &outer)
+ : mLock(outer.mMutex), mValue(outer.mValue)
+ {}
+
+ std::unique_lock<Lockable> mLock;
+ const T &mValue;
+
+ friend class SynchronizedValue;
+ };
+
+ DerefValue operator*() { return DerefValue(*this); }
+ ConstDerefValue operator*() const { return ConstDerefValue(*this); }
+
+ template <typename OStream>
+ void save(OStream &os) const
+ {
+ std::lock_guard<Lockable> lock(mMutex);
+ os << mValue;
+ }
+
+ template <typename IStream>
+ void load(IStream &is)
+ {
+ std::lock_guard<Lockable> lock(mMutex);
+ is >> mValue;
+ }
+
+ bool operator==(const SynchronizedValue &other) const
+ {
+ std::unique_lock<Lockable> lock1(mMutex, std::defer_lock);
+ std::unique_lock<Lockable> lock2(other.mMutex, std::defer_lock);
+ std::lock(lock1, lock2);
+ return mValue == other.mValue;
+ }
+
+ bool operator!=(const SynchronizedValue &other) const
+ {
+ std::unique_lock<Lockable> lock1(mMutex, std::defer_lock);
+ std::unique_lock<Lockable> lock2(other.mMutex, std::defer_lock);
+ std::lock(lock1, lock2);
+ return mValue != other.mValue;
+ }
+
+ bool operator<(const SynchronizedValue &other) const
+ {
+ std::unique_lock<Lockable> lock1(mMutex, std::defer_lock);
+ std::unique_lock<Lockable> lock2(other.mMutex, std::defer_lock);
+ std::lock(lock1, lock2);
+ return mValue < other.mValue;
+ }
+
+ bool operator>(const SynchronizedValue &other) const
+ {
+ std::unique_lock<Lockable> lock1(mMutex, std::defer_lock);
+ std::unique_lock<Lockable> lock2(other.mMutex, std::defer_lock);
+ std::lock(lock1, lock2);
+ return mValue > other.mValue;
+ }
+
+ bool operator<=(const SynchronizedValue &other) const
+ {
+ std::unique_lock<Lockable> lock1(mMutex, std::defer_lock);
+ std::unique_lock<Lockable> lock2(other.mMutex, std::defer_lock);
+ std::lock(lock1, lock2);
+ return mValue <= other.mValue;
+ }
+
+ bool operator>=(const SynchronizedValue &other) const
+ {
+ std::unique_lock<Lockable> lock1(mMutex, std::defer_lock);
+ std::unique_lock<Lockable> lock2(other.mMutex, std::defer_lock);
+ std::lock(lock1, lock2);
+ return mValue >= other.mValue;
+ }
+
+ bool operator==(const T &other) const
+ {
+ std::lock_guard<Lockable> lock(mMutex);
+ return mValue == other;
+ }
+
+ bool operator!=(const T &other) const
+ {
+ std::lock_guard<Lockable> lock(mMutex);
+ return mValue != other;
+ }
+
+ bool operator<(const T &other) const
+ {
+ std::lock_guard<Lockable> lock(mMutex);
+ return mValue < other;
+ }
+
+ bool operator>(const T &other) const
+ {
+ std::lock_guard<Lockable> lock(mMutex);
+ return mValue > other;
+ }
+
+ bool operator<=(const T &other) const
+ {
+ std::lock_guard<Lockable> lock(mMutex);
+ return mValue <= other;
+ }
+
+ bool operator>=(const T &other) const
+ {
+ std::lock_guard<Lockable> lock(mMutex);
+ return mValue >= other;
+ }
+
+ private:
+ T mValue;
+ mutable Lockable mMutex;
+};
+
+template <typename OStream, typename T, typename L>
+inline OStream &operator<<(OStream &os, SynchronizedValue<T, L> const &sv)
+{
+ sv.save(os);
+ return os;
+}
+
+template <typename IStream, typename T, typename L>
+inline IStream &operator>>(IStream &is, SynchronizedValue<T, L> &sv)
+{
+ sv.load(is);
+ return is;
+}
+
+template <typename T, typename L>
+bool operator==(const T &lhs, const SynchronizedValue<T, L> &rhs)
+{
+ return rhs == lhs;
+}
+
+template <typename T, typename L>
+bool operator!=(const T &lhs, const SynchronizedValue<T, L> &rhs)
+{
+ return rhs != lhs;
+}
+
+template <typename T, typename L>
+bool operator<(const T &lhs, const SynchronizedValue<T, L> &rhs)
+{
+ return rhs < lhs;
+}
+
+template <typename T, typename L>
+bool operator>(const T &lhs, const SynchronizedValue<T, L> &rhs)
+{
+ return rhs > lhs;
+}
+
+template <typename T, typename L>
+bool operator<=(const T &lhs, const SynchronizedValue<T, L> &rhs)
+{
+ return rhs <= lhs;
+}
+
+template <typename T, typename L>
+bool operator>=(const T &lhs, const SynchronizedValue<T, L> &rhs)
+{
+ return rhs >= lhs;
+}
+
+} // namespace angle
+
+#endif // COMMON_SYNCHRONIZEDVALUE_H_
diff --git a/gfx/angle/checkout/src/common/aligned_memory.cpp b/gfx/angle/checkout/src/common/aligned_memory.cpp
new file mode 100644
index 0000000000..9798fc0f42
--- /dev/null
+++ b/gfx/angle/checkout/src/common/aligned_memory.cpp
@@ -0,0 +1,64 @@
+//
+// Copyright 2017 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// aligned_memory: An aligned memory allocator. Based on Chrome's base/memory/aligned_memory.
+//
+
+#include "common/aligned_memory.h"
+
+#include "common/debug.h"
+#include "common/platform.h"
+
+#if defined(COMPILER_MSVC)
+# include <malloc.h>
+#else
+# include <stdlib.h>
+#endif
+
+namespace angle
+{
+
+void *AlignedAlloc(size_t size, size_t alignment)
+{
+ ASSERT(size > 0);
+ ASSERT((alignment & (alignment - 1)) == 0);
+ ASSERT((alignment % sizeof(void *)) == 0);
+ void *ptr = nullptr;
+#if defined(ANGLE_PLATFORM_WINDOWS)
+ ptr = _aligned_malloc(size, alignment);
+// Android technically supports posix_memalign(), but does not expose it in
+// the current version of the library headers used by Chrome. Luckily,
+// memalign() on Android returns pointers which can safely be used with
+// free(), so we can use it instead. Issue filed to document this:
+// http://code.google.com/p/android/issues/detail?id=35391
+#elif defined(ANGLE_PLATFORM_ANDROID)
+ ptr = memalign(alignment, size);
+#else
+ if (posix_memalign(&ptr, alignment, size))
+ ptr = nullptr;
+#endif
+ // Since aligned allocations may fail for non-memory related reasons, force a
+ // crash if we encounter a failed allocation.
+ if (!ptr)
+ {
+ ERR() << "If you crashed here, your aligned allocation is incorrect: "
+ << "size=" << size << ", alignment=" << alignment;
+ ASSERT(false);
+ }
+ // Confidence check alignment just to be safe.
+ ASSERT((reinterpret_cast<uintptr_t>(ptr) & (alignment - 1)) == 0);
+ return ptr;
+}
+
+void AlignedFree(void *ptr)
+{
+#if defined(ANGLE_PLATFORM_WINDOWS)
+ _aligned_free(ptr);
+#else
+ free(ptr);
+#endif
+}
+
+} // namespace angle
diff --git a/gfx/angle/checkout/src/common/aligned_memory.h b/gfx/angle/checkout/src/common/aligned_memory.h
new file mode 100644
index 0000000000..dcbb60d1cb
--- /dev/null
+++ b/gfx/angle/checkout/src/common/aligned_memory.h
@@ -0,0 +1,23 @@
+//
+// Copyright 2017 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// aligned_memory: An aligned memory allocator. Based on Chrome's base/memory/aligned_memory.
+//
+
+#ifndef COMMON_ALIGNED_MEMORY_H_
+#define COMMON_ALIGNED_MEMORY_H_
+
+#include <cstddef>
+
+namespace angle
+{
+
+// This can be replaced with std::aligned_malloc when we have C++17.
+void *AlignedAlloc(size_t size, size_t alignment);
+void AlignedFree(void *ptr);
+
+} // namespace angle
+
+#endif // COMMON_ALIGNED_MEMORY_H_
diff --git a/gfx/angle/checkout/src/common/android_util.cpp b/gfx/angle/checkout/src/common/android_util.cpp
new file mode 100644
index 0000000000..8188da21ef
--- /dev/null
+++ b/gfx/angle/checkout/src/common/android_util.cpp
@@ -0,0 +1,424 @@
+//
+// Copyright 2018 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+
+// android_util.cpp: Utilities for the using the Android platform
+
+#include "common/android_util.h"
+#include "common/debug.h"
+
+#include <cstdint>
+
+#if defined(ANGLE_PLATFORM_ANDROID) && __ANDROID_API__ >= 26
+# define ANGLE_AHARDWARE_BUFFER_SUPPORT
+// NDK header file for access to Android Hardware Buffers
+# include <android/hardware_buffer.h>
+#endif
+
+// Taken from cutils/native_handle.h:
+// https://android.googlesource.com/platform/system/core/+/master/libcutils/include/cutils/native_handle.h
+typedef struct native_handle
+{
+ int version; /* sizeof(native_handle_t) */
+ int numFds; /* number of file-descriptors at &data[0] */
+ int numInts; /* number of ints at &data[numFds] */
+#if defined(__clang__)
+# pragma clang diagnostic push
+# pragma clang diagnostic ignored "-Wzero-length-array"
+#elif defined(_MSC_VER)
+# pragma warning(push)
+# pragma warning(disable : 4200)
+#endif
+ int data[0]; /* numFds + numInts ints */
+#if defined(__clang__)
+# pragma clang diagnostic pop
+#elif defined(_MSC_VER)
+# pragma warning(pop)
+#endif
+} native_handle_t;
+
+// Taken from nativebase/nativebase.h
+// https://android.googlesource.com/platform/frameworks/native/+/master/libs/nativebase/include/nativebase/nativebase.h
+typedef const native_handle_t *buffer_handle_t;
+
+typedef struct android_native_base_t
+{
+ /* a magic value defined by the actual EGL native type */
+ int magic;
+ /* the sizeof() of the actual EGL native type */
+ int version;
+ void *reserved[4];
+ /* reference-counting interface */
+ void (*incRef)(struct android_native_base_t *base);
+ void (*decRef)(struct android_native_base_t *base);
+} android_native_base_t;
+
+typedef struct ANativeWindowBuffer
+{
+ struct android_native_base_t common;
+ int width;
+ int height;
+ int stride;
+ int format;
+ int usage_deprecated;
+ uintptr_t layerCount;
+ void *reserved[1];
+ const native_handle_t *handle;
+ uint64_t usage;
+ // we needed extra space for storing the 64-bits usage flags
+ // the number of slots to use from reserved_proc depends on the
+ // architecture.
+ void *reserved_proc[8 - (sizeof(uint64_t) / sizeof(void *))];
+} ANativeWindowBuffer_t;
+
+// Taken from android/hardware_buffer.h
+// https://android.googlesource.com/platform/frameworks/native/+/master/libs/nativewindow/include/android/hardware_buffer.h
+
+// AHARDWAREBUFFER_FORMAT_B8G8R8A8_UNORM AHARDWAREBUFFER_FORMAT_B4G4R4A4_UNORM,
+// AHARDWAREBUFFER_FORMAT_B5G5R5A1_UNORM formats were deprecated and re-added explicitly.
+
+// clang-format off
+/**
+ * Buffer pixel formats.
+ */
+enum {
+
+#ifndef ANGLE_AHARDWARE_BUFFER_SUPPORT
+ /**
+ * Corresponding formats:
+ * Vulkan: VK_FORMAT_R8G8B8A8_UNORM
+ * OpenGL ES: GL_RGBA8
+ */
+ AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM = 1,
+
+ /**
+ * 32 bits per pixel, 8 bits per channel format where alpha values are
+ * ignored (always opaque).
+ * Corresponding formats:
+ * Vulkan: VK_FORMAT_R8G8B8A8_UNORM
+ * OpenGL ES: GL_RGB8
+ */
+ AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM = 2,
+
+ /**
+ * Corresponding formats:
+ * Vulkan: VK_FORMAT_R8G8B8_UNORM
+ * OpenGL ES: GL_RGB8
+ */
+ AHARDWAREBUFFER_FORMAT_R8G8B8_UNORM = 3,
+
+ /**
+ * Corresponding formats:
+ * Vulkan: VK_FORMAT_R5G6B5_UNORM_PACK16
+ * OpenGL ES: GL_RGB565
+ */
+ AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM = 4,
+#endif // ANGLE_AHARDWARE_BUFFER_SUPPORT
+
+ AHARDWAREBUFFER_FORMAT_B8G8R8A8_UNORM = 5,
+ AHARDWAREBUFFER_FORMAT_B5G5R5A1_UNORM = 6,
+ AHARDWAREBUFFER_FORMAT_B4G4R4A4_UNORM = 7,
+
+#ifndef ANGLE_AHARDWARE_BUFFER_SUPPORT
+ /**
+ * Corresponding formats:
+ * Vulkan: VK_FORMAT_R16G16B16A16_SFLOAT
+ * OpenGL ES: GL_RGBA16F
+ */
+ AHARDWAREBUFFER_FORMAT_R16G16B16A16_FLOAT = 0x16,
+
+ /**
+ * Corresponding formats:
+ * Vulkan: VK_FORMAT_A2B10G10R10_UNORM_PACK32
+ * OpenGL ES: GL_RGB10_A2
+ */
+ AHARDWAREBUFFER_FORMAT_R10G10B10A2_UNORM = 0x2b,
+
+ /**
+ * An opaque binary blob format that must have height 1, with width equal to
+ * the buffer size in bytes.
+ */
+ AHARDWAREBUFFER_FORMAT_BLOB = 0x21,
+
+ /**
+ * Corresponding formats:
+ * Vulkan: VK_FORMAT_D16_UNORM
+ * OpenGL ES: GL_DEPTH_COMPONENT16
+ */
+ AHARDWAREBUFFER_FORMAT_D16_UNORM = 0x30,
+
+ /**
+ * Corresponding formats:
+ * Vulkan: VK_FORMAT_X8_D24_UNORM_PACK32
+ * OpenGL ES: GL_DEPTH_COMPONENT24
+ */
+ AHARDWAREBUFFER_FORMAT_D24_UNORM = 0x31,
+
+ /**
+ * Corresponding formats:
+ * Vulkan: VK_FORMAT_D24_UNORM_S8_UINT
+ * OpenGL ES: GL_DEPTH24_STENCIL8
+ */
+ AHARDWAREBUFFER_FORMAT_D24_UNORM_S8_UINT = 0x32,
+
+ /**
+ * Corresponding formats:
+ * Vulkan: VK_FORMAT_D32_SFLOAT
+ * OpenGL ES: GL_DEPTH_COMPONENT32F
+ */
+ AHARDWAREBUFFER_FORMAT_D32_FLOAT = 0x33,
+
+ /**
+ * Corresponding formats:
+ * Vulkan: VK_FORMAT_D32_SFLOAT_S8_UINT
+ * OpenGL ES: GL_DEPTH32F_STENCIL8
+ */
+ AHARDWAREBUFFER_FORMAT_D32_FLOAT_S8_UINT = 0x34,
+
+ /**
+ * Corresponding formats:
+ * Vulkan: VK_FORMAT_S8_UINT
+ * OpenGL ES: GL_STENCIL_INDEX8
+ */
+ AHARDWAREBUFFER_FORMAT_S8_UINT = 0x35,
+
+ /**
+ * YUV 420 888 format.
+ * Must have an even width and height. Can be accessed in OpenGL
+ * shaders through an external sampler. Does not support mip-maps
+ * cube-maps or multi-layered textures.
+ */
+ AHARDWAREBUFFER_FORMAT_Y8Cb8Cr8_420 = 0x23,
+
+#endif // ANGLE_AHARDWARE_BUFFER_SUPPORT
+
+ AHARDWAREBUFFER_FORMAT_YV12 = 0x32315659,
+ AHARDWAREBUFFER_FORMAT_IMPLEMENTATION_DEFINED = 0x22,
+};
+// clang-format on
+
+namespace
+{
+
+// In the Android system:
+// - AHardwareBuffer is essentially a typedef of GraphicBuffer. Conversion functions simply
+// reinterpret_cast.
+// - GraphicBuffer inherits from two base classes, ANativeWindowBuffer and RefBase.
+//
+// GraphicBuffer implements a getter for ANativeWindowBuffer (getNativeBuffer) by static_casting
+// itself to its base class ANativeWindowBuffer. The offset of the ANativeWindowBuffer pointer
+// from the GraphicBuffer pointer is 16 bytes. This is likely due to two pointers: The vtable of
+// GraphicBuffer and the one pointer member of the RefBase class.
+//
+// This is not future proof at all. We need to look into getting utilities added to Android to
+// perform this cast for us.
+constexpr int kAHardwareBufferToANativeWindowBufferOffset = static_cast<int>(sizeof(void *)) * 2;
+
+template <typename T1, typename T2>
+T1 *OffsetPointer(T2 *ptr, int bytes)
+{
+ return reinterpret_cast<T1 *>(reinterpret_cast<intptr_t>(ptr) + bytes);
+}
+
+GLenum GetPixelFormatInfo(int pixelFormat, bool *isYUV)
+{
+ *isYUV = false;
+ switch (pixelFormat)
+ {
+ case AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM:
+ return GL_RGBA8;
+ case AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM:
+ return GL_RGB8;
+ case AHARDWAREBUFFER_FORMAT_R8G8B8_UNORM:
+ return GL_RGB8;
+ case AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM:
+ return GL_RGB565;
+ case AHARDWAREBUFFER_FORMAT_B8G8R8A8_UNORM:
+ return GL_BGRA8_EXT;
+ case AHARDWAREBUFFER_FORMAT_B5G5R5A1_UNORM:
+ return GL_RGB5_A1;
+ case AHARDWAREBUFFER_FORMAT_B4G4R4A4_UNORM:
+ return GL_RGBA4;
+ case AHARDWAREBUFFER_FORMAT_R16G16B16A16_FLOAT:
+ return GL_RGBA16F;
+ case AHARDWAREBUFFER_FORMAT_R10G10B10A2_UNORM:
+ return GL_RGB10_A2;
+ case AHARDWAREBUFFER_FORMAT_BLOB:
+ return GL_NONE;
+ case AHARDWAREBUFFER_FORMAT_D16_UNORM:
+ return GL_DEPTH_COMPONENT16;
+ case AHARDWAREBUFFER_FORMAT_D24_UNORM:
+ return GL_DEPTH_COMPONENT24;
+ case AHARDWAREBUFFER_FORMAT_D24_UNORM_S8_UINT:
+ return GL_DEPTH24_STENCIL8;
+ case AHARDWAREBUFFER_FORMAT_D32_FLOAT:
+ return GL_DEPTH_COMPONENT32F;
+ case AHARDWAREBUFFER_FORMAT_D32_FLOAT_S8_UINT:
+ return GL_DEPTH32F_STENCIL8;
+ case AHARDWAREBUFFER_FORMAT_S8_UINT:
+ return GL_STENCIL_INDEX8;
+ case AHARDWAREBUFFER_FORMAT_Y8Cb8Cr8_420:
+ case AHARDWAREBUFFER_FORMAT_YV12:
+ case AHARDWAREBUFFER_FORMAT_IMPLEMENTATION_DEFINED:
+ *isYUV = true;
+ return GL_RGB8;
+ default:
+ // Treat unknown formats as RGB. They are vendor-specific YUV formats that would sample
+ // as RGB.
+ *isYUV = true;
+ return GL_RGB8;
+ }
+}
+
+} // anonymous namespace
+
+namespace angle
+{
+
+namespace android
+{
+
+ANativeWindowBuffer *ClientBufferToANativeWindowBuffer(EGLClientBuffer clientBuffer)
+{
+ return reinterpret_cast<ANativeWindowBuffer *>(clientBuffer);
+}
+
+uint64_t GetAHBUsage(int eglNativeBufferUsage)
+{
+ uint64_t ahbUsage = 0;
+#if defined(ANGLE_AHARDWARE_BUFFER_SUPPORT)
+ if (eglNativeBufferUsage & EGL_NATIVE_BUFFER_USAGE_PROTECTED_BIT_ANDROID)
+ {
+ ahbUsage |= AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT;
+ }
+ if (eglNativeBufferUsage & EGL_NATIVE_BUFFER_USAGE_RENDERBUFFER_BIT_ANDROID)
+ {
+ ahbUsage |= AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER;
+ }
+ if (eglNativeBufferUsage & EGL_NATIVE_BUFFER_USAGE_TEXTURE_BIT_ANDROID)
+ {
+ ahbUsage |= AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE;
+ }
+#endif // ANGLE_AHARDWARE_BUFFER_SUPPORT
+ return ahbUsage;
+}
+
+EGLClientBuffer CreateEGLClientBufferFromAHardwareBuffer(int width,
+ int height,
+ int depth,
+ int androidFormat,
+ int usage)
+{
+#if defined(ANGLE_AHARDWARE_BUFFER_SUPPORT)
+
+ // The height and width are number of pixels of size format
+ AHardwareBuffer_Desc aHardwareBufferDescription = {};
+ aHardwareBufferDescription.width = static_cast<uint32_t>(width);
+ aHardwareBufferDescription.height = static_cast<uint32_t>(height);
+ aHardwareBufferDescription.layers = static_cast<uint32_t>(depth);
+ aHardwareBufferDescription.format = androidFormat;
+ aHardwareBufferDescription.usage = GetAHBUsage(usage);
+
+ // Allocate memory from Android Hardware Buffer
+ AHardwareBuffer *aHardwareBuffer = nullptr;
+ int res = AHardwareBuffer_allocate(&aHardwareBufferDescription, &aHardwareBuffer);
+ if (res != 0)
+ {
+ return nullptr;
+ }
+
+ return AHardwareBufferToClientBuffer(aHardwareBuffer);
+#else
+ return nullptr;
+#endif // ANGLE_AHARDWARE_BUFFER_SUPPORT
+}
+
+void GetANativeWindowBufferProperties(const ANativeWindowBuffer *buffer,
+ int *width,
+ int *height,
+ int *depth,
+ int *pixelFormat,
+ uint64_t *usage)
+{
+ *width = buffer->width;
+ *height = buffer->height;
+ *depth = static_cast<int>(buffer->layerCount);
+ *height = buffer->height;
+ *pixelFormat = buffer->format;
+ *usage = buffer->usage;
+}
+
+GLenum NativePixelFormatToGLInternalFormat(int pixelFormat)
+{
+ bool isYuv = false;
+ return GetPixelFormatInfo(pixelFormat, &isYuv);
+}
+
+int GLInternalFormatToNativePixelFormat(GLenum internalFormat)
+{
+ switch (internalFormat)
+ {
+ case GL_RGBA8:
+ return AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM;
+ case GL_RGB8:
+ return AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM;
+ case GL_RGB565:
+ return AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM;
+ case GL_BGRA8_EXT:
+ return AHARDWAREBUFFER_FORMAT_B8G8R8A8_UNORM;
+ case GL_RGB5_A1:
+ return AHARDWAREBUFFER_FORMAT_B5G5R5A1_UNORM;
+ case GL_RGBA4:
+ return AHARDWAREBUFFER_FORMAT_B4G4R4A4_UNORM;
+ case GL_RGBA16F:
+ return AHARDWAREBUFFER_FORMAT_R16G16B16A16_FLOAT;
+ case GL_RGB10_A2:
+ return AHARDWAREBUFFER_FORMAT_R10G10B10A2_UNORM;
+ case GL_NONE:
+ return AHARDWAREBUFFER_FORMAT_BLOB;
+ case GL_DEPTH_COMPONENT16:
+ return AHARDWAREBUFFER_FORMAT_D16_UNORM;
+ case GL_DEPTH_COMPONENT24:
+ return AHARDWAREBUFFER_FORMAT_D24_UNORM;
+ case GL_DEPTH24_STENCIL8:
+ return AHARDWAREBUFFER_FORMAT_D24_UNORM_S8_UINT;
+ case GL_DEPTH_COMPONENT32F:
+ return AHARDWAREBUFFER_FORMAT_D32_FLOAT;
+ case GL_DEPTH32F_STENCIL8:
+ return AHARDWAREBUFFER_FORMAT_D32_FLOAT_S8_UINT;
+ case GL_STENCIL_INDEX8:
+ return AHARDWAREBUFFER_FORMAT_S8_UINT;
+ default:
+ WARN() << "Unknown internalFormat: " << internalFormat << ". Treating as 0";
+ return 0;
+ }
+}
+
+bool NativePixelFormatIsYUV(int pixelFormat)
+{
+ bool isYuv = false;
+ GetPixelFormatInfo(pixelFormat, &isYuv);
+ return isYuv;
+}
+
+AHardwareBuffer *ANativeWindowBufferToAHardwareBuffer(ANativeWindowBuffer *windowBuffer)
+{
+ return OffsetPointer<AHardwareBuffer>(windowBuffer,
+ -kAHardwareBufferToANativeWindowBufferOffset);
+}
+
+EGLClientBuffer AHardwareBufferToClientBuffer(const AHardwareBuffer *hardwareBuffer)
+{
+ return OffsetPointer<EGLClientBuffer>(hardwareBuffer,
+ kAHardwareBufferToANativeWindowBufferOffset);
+}
+
+AHardwareBuffer *ClientBufferToAHardwareBuffer(EGLClientBuffer clientBuffer)
+{
+ return OffsetPointer<AHardwareBuffer>(clientBuffer,
+ -kAHardwareBufferToANativeWindowBufferOffset);
+}
+} // namespace android
+} // namespace angle
diff --git a/gfx/angle/checkout/src/common/android_util.h b/gfx/angle/checkout/src/common/android_util.h
new file mode 100644
index 0000000000..eee60ba244
--- /dev/null
+++ b/gfx/angle/checkout/src/common/android_util.h
@@ -0,0 +1,59 @@
+//
+// Copyright 2018 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+
+// android_util.h: Utilities for the using the Android platform
+
+#ifndef COMMON_ANDROIDUTIL_H_
+#define COMMON_ANDROIDUTIL_H_
+
+#include <EGL/egl.h>
+#include <EGL/eglext.h>
+
+#include <stdint.h>
+#include <array>
+
+#include "angle_gl.h"
+
+struct ANativeWindowBuffer;
+struct AHardwareBuffer;
+
+namespace angle
+{
+
+namespace android
+{
+
+constexpr std::array<GLenum, 3> kSupportedSizedInternalFormats = {GL_RGBA8, GL_RGB8, GL_RGB565};
+
+ANativeWindowBuffer *ClientBufferToANativeWindowBuffer(EGLClientBuffer clientBuffer);
+EGLClientBuffer AHardwareBufferToClientBuffer(const AHardwareBuffer *hardwareBuffer);
+AHardwareBuffer *ClientBufferToAHardwareBuffer(EGLClientBuffer clientBuffer);
+
+EGLClientBuffer CreateEGLClientBufferFromAHardwareBuffer(int width,
+ int height,
+ int depth,
+ int androidFormat,
+ int usage);
+
+void GetANativeWindowBufferProperties(const ANativeWindowBuffer *buffer,
+ int *width,
+ int *height,
+ int *depth,
+ int *pixelFormat,
+ uint64_t *usage);
+GLenum NativePixelFormatToGLInternalFormat(int pixelFormat);
+int GLInternalFormatToNativePixelFormat(GLenum internalFormat);
+
+bool NativePixelFormatIsYUV(int pixelFormat);
+
+AHardwareBuffer *ANativeWindowBufferToAHardwareBuffer(ANativeWindowBuffer *windowBuffer);
+
+uint64_t GetAHBUsage(int eglNativeBufferUsage);
+
+} // namespace android
+} // namespace angle
+
+#endif // COMMON_ANDROIDUTIL_H_
diff --git a/gfx/angle/checkout/src/common/angle_version.h b/gfx/angle/checkout/src/common/angle_version.h
new file mode 100644
index 0000000000..d9d7e8929d
--- /dev/null
+++ b/gfx/angle/checkout/src/common/angle_version.h
@@ -0,0 +1,28 @@
+//
+// Copyright 2014 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// angle_version.h: ANGLE version constants. Generated from git commands.
+
+#ifndef COMMON_ANGLE_VERSION_H_
+#define COMMON_ANGLE_VERSION_H_
+
+#include "angle_commit.h"
+
+#define ANGLE_MAJOR_VERSION 2
+#define ANGLE_MINOR_VERSION 1
+
+#ifndef ANGLE_REVISION
+# define ANGLE_REVISION ANGLE_COMMIT_POSITION
+#endif
+
+#define ANGLE_STRINGIFY(x) #x
+#define ANGLE_MACRO_STRINGIFY(x) ANGLE_STRINGIFY(x)
+
+#define ANGLE_VERSION_STRING \
+ ANGLE_MACRO_STRINGIFY(ANGLE_MAJOR_VERSION) \
+ "." ANGLE_MACRO_STRINGIFY(ANGLE_MINOR_VERSION) "." ANGLE_MACRO_STRINGIFY( \
+ ANGLE_REVISION) " git hash: " ANGLE_COMMIT_HASH
+
+#endif // COMMON_ANGLE_VERSION_H_
diff --git a/gfx/angle/checkout/src/common/angle_version_info.cpp b/gfx/angle/checkout/src/common/angle_version_info.cpp
new file mode 100644
index 0000000000..963741a456
--- /dev/null
+++ b/gfx/angle/checkout/src/common/angle_version_info.cpp
@@ -0,0 +1,40 @@
+//
+// Copyright 2021 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// angle_version_info.cpp: ANGLE version queries.
+
+#include "common/angle_version.h"
+
+namespace angle
+{
+int GetANGLERevision()
+{
+ return ANGLE_REVISION;
+}
+
+const char *GetANGLEVersionString()
+{
+ return ANGLE_VERSION_STRING;
+}
+
+const char *GetANGLECommitHash()
+{
+ return ANGLE_COMMIT_HASH;
+}
+
+int GetANGLECommitHashSize()
+{
+ return ANGLE_COMMIT_HASH_SIZE;
+}
+
+bool GetANGLEHasBinaryLoading()
+{
+#ifdef ANGLE_HAS_BINARY_LOADING
+ return true;
+#else
+ return false;
+#endif // #ifndef ANGLE_HAS_BINARY_LOADING
+}
+} // namespace angle
diff --git a/gfx/angle/checkout/src/common/angle_version_info.h b/gfx/angle/checkout/src/common/angle_version_info.h
new file mode 100644
index 0000000000..1d5392068c
--- /dev/null
+++ b/gfx/angle/checkout/src/common/angle_version_info.h
@@ -0,0 +1,20 @@
+//
+// Copyright 2021 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// angle_version_info.h: ANGLE version queries.
+
+#ifndef COMMON_VERSION_INFO_H_
+#define COMMON_VERSION_INFO_H_
+
+namespace angle
+{
+int GetANGLERevision();
+const char *GetANGLEVersionString();
+const char *GetANGLECommitHash();
+int GetANGLECommitHashSize();
+bool GetANGLEHasBinaryLoading();
+} // namespace angle
+
+#endif // COMMON_VERSION_INFO_H_
diff --git a/gfx/angle/checkout/src/common/angleutils.cpp b/gfx/angle/checkout/src/common/angleutils.cpp
new file mode 100644
index 0000000000..2b69f66d74
--- /dev/null
+++ b/gfx/angle/checkout/src/common/angleutils.cpp
@@ -0,0 +1,156 @@
+//
+// Copyright 2014 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+
+#include "common/angleutils.h"
+#include "common/debug.h"
+
+#include <stdio.h>
+
+#include <limits>
+#include <vector>
+
+namespace angle
+{
+// dirtyPointer is a special value that will make the comparison with any valid pointer fail and
+// force the renderer to re-apply the state.
+const uintptr_t DirtyPointer = std::numeric_limits<uintptr_t>::max();
+
+SaveFileHelper::SaveFileHelper(const std::string &filePathIn)
+ : mOfs(filePathIn, std::ios::binary | std::ios::out), mFilePath(filePathIn)
+{
+ if (!mOfs.is_open())
+ {
+ FATAL() << "Could not open " << filePathIn;
+ }
+}
+
+SaveFileHelper::~SaveFileHelper()
+{
+ printf("Saved '%s'.\n", mFilePath.c_str());
+}
+
+void SaveFileHelper::checkError()
+{
+ if (mOfs.bad())
+ {
+ FATAL() << "Error writing to " << mFilePath;
+ }
+}
+
+void SaveFileHelper::write(const uint8_t *data, size_t size)
+{
+ mOfs.write(reinterpret_cast<const char *>(data), size);
+}
+
+// AMD_performance_monitor helpers.
+
+PerfMonitorCounter::PerfMonitorCounter() = default;
+
+PerfMonitorCounter::~PerfMonitorCounter() = default;
+
+PerfMonitorCounterGroup::PerfMonitorCounterGroup() = default;
+
+PerfMonitorCounterGroup::~PerfMonitorCounterGroup() = default;
+
+uint32_t GetPerfMonitorCounterIndex(const PerfMonitorCounters &counters, const std::string &name)
+{
+ for (uint32_t counterIndex = 0; counterIndex < static_cast<uint32_t>(counters.size());
+ ++counterIndex)
+ {
+ if (counters[counterIndex].name == name)
+ {
+ return counterIndex;
+ }
+ }
+
+ return std::numeric_limits<uint32_t>::max();
+}
+
+uint32_t GetPerfMonitorCounterGroupIndex(const PerfMonitorCounterGroups &groups,
+ const std::string &name)
+{
+ for (uint32_t groupIndex = 0; groupIndex < static_cast<uint32_t>(groups.size()); ++groupIndex)
+ {
+ if (groups[groupIndex].name == name)
+ {
+ return groupIndex;
+ }
+ }
+
+ return std::numeric_limits<uint32_t>::max();
+}
+
+const PerfMonitorCounter &GetPerfMonitorCounter(const PerfMonitorCounters &counters,
+ const std::string &name)
+{
+ return GetPerfMonitorCounter(const_cast<PerfMonitorCounters &>(counters), name);
+}
+
+PerfMonitorCounter &GetPerfMonitorCounter(PerfMonitorCounters &counters, const std::string &name)
+{
+ uint32_t counterIndex = GetPerfMonitorCounterIndex(counters, name);
+ ASSERT(counterIndex < static_cast<uint32_t>(counters.size()));
+ return counters[counterIndex];
+}
+
+const PerfMonitorCounterGroup &GetPerfMonitorCounterGroup(const PerfMonitorCounterGroups &groups,
+ const std::string &name)
+{
+ return GetPerfMonitorCounterGroup(const_cast<PerfMonitorCounterGroups &>(groups), name);
+}
+
+PerfMonitorCounterGroup &GetPerfMonitorCounterGroup(PerfMonitorCounterGroups &groups,
+ const std::string &name)
+{
+ uint32_t groupIndex = GetPerfMonitorCounterGroupIndex(groups, name);
+ ASSERT(groupIndex < static_cast<uint32_t>(groups.size()));
+ return groups[groupIndex];
+}
+} // namespace angle
+
+std::string ArrayString(unsigned int i)
+{
+ // We assume that UINT_MAX and GL_INVALID_INDEX are equal.
+ ASSERT(i != UINT_MAX);
+
+ std::stringstream strstr;
+ strstr << "[";
+ strstr << i;
+ strstr << "]";
+ return strstr.str();
+}
+
+std::string ArrayIndexString(const std::vector<unsigned int> &indices)
+{
+ std::stringstream strstr;
+
+ for (auto indicesIt = indices.rbegin(); indicesIt != indices.rend(); ++indicesIt)
+ {
+ // We assume that UINT_MAX and GL_INVALID_INDEX are equal.
+ ASSERT(*indicesIt != UINT_MAX);
+ strstr << "[";
+ strstr << (*indicesIt);
+ strstr << "]";
+ }
+
+ return strstr.str();
+}
+
+size_t FormatStringIntoVector(const char *fmt, va_list vararg, std::vector<char> &outBuffer)
+{
+ va_list varargCopy;
+ va_copy(varargCopy, vararg);
+
+ int len = vsnprintf(nullptr, 0, fmt, vararg);
+ ASSERT(len >= 0);
+
+ outBuffer.resize(len + 1, 0);
+
+ len = vsnprintf(outBuffer.data(), outBuffer.size(), fmt, varargCopy);
+ va_end(varargCopy);
+ ASSERT(len >= 0);
+ return static_cast<size_t>(len);
+}
diff --git a/gfx/angle/checkout/src/common/angleutils.h b/gfx/angle/checkout/src/common/angleutils.h
new file mode 100644
index 0000000000..bcbcabc782
--- /dev/null
+++ b/gfx/angle/checkout/src/common/angleutils.h
@@ -0,0 +1,601 @@
+//
+// Copyright 2002 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+
+// angleutils.h: Common ANGLE utilities.
+
+#ifndef COMMON_ANGLEUTILS_H_
+#define COMMON_ANGLEUTILS_H_
+
+#include "common/platform.h"
+
+#if defined(ANGLE_USE_ABSEIL)
+# include "absl/container/flat_hash_map.h"
+# include "absl/container/flat_hash_set.h"
+#endif // defined(ANGLE_USE_ABSEIL)
+
+#if defined(ANGLE_WITH_LSAN)
+# include <sanitizer/lsan_interface.h>
+#endif // defined(ANGLE_WITH_LSAN)
+
+#include <climits>
+#include <cstdarg>
+#include <cstddef>
+#include <fstream>
+#include <mutex>
+#include <set>
+#include <sstream>
+#include <string>
+#include <unordered_map>
+#include <unordered_set>
+#include <vector>
+
+// A helper class to disallow copy and assignment operators
+namespace angle
+{
+
+#if defined(ANGLE_ENABLE_D3D9) || defined(ANGLE_ENABLE_D3D11)
+using Microsoft::WRL::ComPtr;
+#endif // defined(ANGLE_ENABLE_D3D9) || defined(ANGLE_ENABLE_D3D11)
+
+#if defined(ANGLE_USE_ABSEIL)
+template <typename Key, typename T, class Hash = absl::container_internal::hash_default_hash<Key>>
+using HashMap = absl::flat_hash_map<Key, T, Hash>;
+template <typename Key, class Hash = absl::container_internal::hash_default_hash<Key>>
+using HashSet = absl::flat_hash_set<Key, Hash>;
+#else
+template <typename Key, typename T, class Hash = std::hash<Key>>
+using HashMap = std::unordered_map<Key, T, Hash>;
+template <typename Key, class Hash = std::hash<Key>>
+using HashSet = std::unordered_set<Key, Hash>;
+#endif // defined(ANGLE_USE_ABSEIL)
+
+class NonCopyable
+{
+ protected:
+ constexpr NonCopyable() = default;
+ ~NonCopyable() = default;
+
+ private:
+ NonCopyable(const NonCopyable &) = delete;
+ void operator=(const NonCopyable &) = delete;
+};
+
+extern const uintptr_t DirtyPointer;
+
+struct SaveFileHelper
+{
+ public:
+ // We always use ios::binary to avoid inconsistent line endings when captured on Linux vs Win.
+ SaveFileHelper(const std::string &filePathIn);
+ ~SaveFileHelper();
+
+ template <typename T>
+ SaveFileHelper &operator<<(const T &value)
+ {
+ mOfs << value;
+ checkError();
+ return *this;
+ }
+
+ void write(const uint8_t *data, size_t size);
+
+ private:
+ void checkError();
+
+ std::ofstream mOfs;
+ std::string mFilePath;
+};
+
+// AMD_performance_monitor helpers.
+constexpr char kPerfMonitorExtensionName[] = "GL_AMD_performance_monitor";
+
+struct PerfMonitorCounter
+{
+ PerfMonitorCounter();
+ ~PerfMonitorCounter();
+
+ std::string name;
+ uint64_t value;
+};
+using PerfMonitorCounters = std::vector<PerfMonitorCounter>;
+
+struct PerfMonitorCounterGroup
+{
+ PerfMonitorCounterGroup();
+ ~PerfMonitorCounterGroup();
+
+ std::string name;
+ PerfMonitorCounters counters;
+};
+using PerfMonitorCounterGroups = std::vector<PerfMonitorCounterGroup>;
+
+uint32_t GetPerfMonitorCounterIndex(const PerfMonitorCounters &counters, const std::string &name);
+const PerfMonitorCounter &GetPerfMonitorCounter(const PerfMonitorCounters &counters,
+ const std::string &name);
+PerfMonitorCounter &GetPerfMonitorCounter(PerfMonitorCounters &counters, const std::string &name);
+uint32_t GetPerfMonitorCounterGroupIndex(const PerfMonitorCounterGroups &groups,
+ const std::string &name);
+const PerfMonitorCounterGroup &GetPerfMonitorCounterGroup(const PerfMonitorCounterGroups &groups,
+ const std::string &name);
+PerfMonitorCounterGroup &GetPerfMonitorCounterGroup(PerfMonitorCounterGroups &groups,
+ const std::string &name);
+
+struct PerfMonitorTriplet
+{
+ uint32_t group;
+ uint32_t counter;
+ uint64_t value;
+};
+
+#define ANGLE_VK_PERF_COUNTERS_X(FN) \
+ FN(commandQueueSubmitCallsTotal) \
+ FN(commandQueueSubmitCallsPerFrame) \
+ FN(vkQueueSubmitCallsTotal) \
+ FN(vkQueueSubmitCallsPerFrame) \
+ FN(renderPasses) \
+ FN(writeDescriptorSets) \
+ FN(flushedOutsideRenderPassCommandBuffers) \
+ FN(swapchainResolveInSubpass) \
+ FN(swapchainResolveOutsideSubpass) \
+ FN(resolveImageCommands) \
+ FN(colorLoadOpClears) \
+ FN(colorLoadOpLoads) \
+ FN(colorLoadOpNones) \
+ FN(colorStoreOpStores) \
+ FN(colorStoreOpNones) \
+ FN(colorClearAttachments) \
+ FN(depthLoadOpClears) \
+ FN(depthLoadOpLoads) \
+ FN(depthLoadOpNones) \
+ FN(depthStoreOpStores) \
+ FN(depthStoreOpNones) \
+ FN(depthClearAttachments) \
+ FN(stencilLoadOpClears) \
+ FN(stencilLoadOpLoads) \
+ FN(stencilLoadOpNones) \
+ FN(stencilStoreOpStores) \
+ FN(stencilStoreOpNones) \
+ FN(stencilClearAttachments) \
+ FN(colorAttachmentUnresolves) \
+ FN(depthAttachmentUnresolves) \
+ FN(stencilAttachmentUnresolves) \
+ FN(colorAttachmentResolves) \
+ FN(depthAttachmentResolves) \
+ FN(stencilAttachmentResolves) \
+ FN(readOnlyDepthStencilRenderPasses) \
+ FN(pipelineCreationCacheHits) \
+ FN(pipelineCreationCacheMisses) \
+ FN(pipelineCreationTotalCacheHitsDurationNs) \
+ FN(pipelineCreationTotalCacheMissesDurationNs) \
+ FN(descriptorSetAllocations) \
+ FN(descriptorSetCacheTotalSize) \
+ FN(descriptorSetCacheKeySizeBytes) \
+ FN(uniformsAndXfbDescriptorSetCacheHits) \
+ FN(uniformsAndXfbDescriptorSetCacheMisses) \
+ FN(uniformsAndXfbDescriptorSetCacheTotalSize) \
+ FN(textureDescriptorSetCacheHits) \
+ FN(textureDescriptorSetCacheMisses) \
+ FN(textureDescriptorSetCacheTotalSize) \
+ FN(shaderResourcesDescriptorSetCacheHits) \
+ FN(mutableTexturesUploaded) \
+ FN(shaderResourcesDescriptorSetCacheMisses) \
+ FN(shaderResourcesDescriptorSetCacheTotalSize) \
+ FN(buffersGhosted) \
+ FN(vertexArraySyncStateCalls) \
+ FN(allocateNewBufferBlockCalls) \
+ FN(dynamicBufferAllocations) \
+ FN(framebufferCacheSize)
+
+#define ANGLE_DECLARE_PERF_COUNTER(COUNTER) uint64_t COUNTER;
+
+struct VulkanPerfCounters
+{
+ ANGLE_VK_PERF_COUNTERS_X(ANGLE_DECLARE_PERF_COUNTER)
+};
+
+#undef ANGLE_DECLARE_PERF_COUNTER
+
+} // namespace angle
+
+template <typename T, size_t N>
+constexpr inline size_t ArraySize(T (&)[N])
+{
+ return N;
+}
+
+template <typename T>
+class WrappedArray final : angle::NonCopyable
+{
+ public:
+ template <size_t N>
+ constexpr WrappedArray(const T (&data)[N]) : mArray(&data[0]), mSize(N)
+ {}
+
+ constexpr WrappedArray() : mArray(nullptr), mSize(0) {}
+ constexpr WrappedArray(const T *data, size_t size) : mArray(data), mSize(size) {}
+
+ WrappedArray(WrappedArray &&other) : WrappedArray()
+ {
+ std::swap(mArray, other.mArray);
+ std::swap(mSize, other.mSize);
+ }
+
+ ~WrappedArray() {}
+
+ constexpr const T *get() const { return mArray; }
+ constexpr size_t size() const { return mSize; }
+
+ private:
+ const T *mArray;
+ size_t mSize;
+};
+
+template <typename T, unsigned int N>
+void SafeRelease(T (&resourceBlock)[N])
+{
+ for (unsigned int i = 0; i < N; i++)
+ {
+ SafeRelease(resourceBlock[i]);
+ }
+}
+
+template <typename T>
+void SafeRelease(T &resource)
+{
+ if (resource)
+ {
+ resource->Release();
+ resource = nullptr;
+ }
+}
+
+template <typename T>
+void SafeDelete(T *&resource)
+{
+ delete resource;
+ resource = nullptr;
+}
+
+template <typename T>
+void SafeDeleteContainer(T &resource)
+{
+ for (auto &element : resource)
+ {
+ SafeDelete(element);
+ }
+ resource.clear();
+}
+
+template <typename T>
+void SafeDeleteArray(T *&resource)
+{
+ delete[] resource;
+ resource = nullptr;
+}
+
+// Provide a less-than function for comparing structs
+// Note: struct memory must be initialized to zero, because of packing gaps
+template <typename T>
+inline bool StructLessThan(const T &a, const T &b)
+{
+ return (memcmp(&a, &b, sizeof(T)) < 0);
+}
+
+// Provide a less-than function for comparing structs
+// Note: struct memory must be initialized to zero, because of packing gaps
+template <typename T>
+inline bool StructEquals(const T &a, const T &b)
+{
+ return (memcmp(&a, &b, sizeof(T)) == 0);
+}
+
+template <typename T>
+inline void StructZero(T *obj)
+{
+ memset(obj, 0, sizeof(T));
+}
+
+template <typename T>
+inline bool IsMaskFlagSet(T mask, T flag)
+{
+ // Handles multibit flags as well
+ return (mask & flag) == flag;
+}
+
+inline const char *MakeStaticString(const std::string &str)
+{
+ // On the heap so that no destructor runs on application exit.
+ static std::set<std::string> *strings = new std::set<std::string>;
+ std::set<std::string>::iterator it = strings->find(str);
+ if (it != strings->end())
+ {
+ return it->c_str();
+ }
+
+ return strings->insert(str).first->c_str();
+}
+
+std::string ArrayString(unsigned int i);
+
+// Indices are stored in vectors with the outermost index in the back. In the output of the function
+// the indices are reversed.
+std::string ArrayIndexString(const std::vector<unsigned int> &indices);
+
+inline std::string Str(int i)
+{
+ std::stringstream strstr;
+ strstr << i;
+ return strstr.str();
+}
+
+template <typename T>
+std::string ToString(const T &value)
+{
+ std::ostringstream o;
+ o << value;
+ return o.str();
+}
+
+inline bool IsLittleEndian()
+{
+ constexpr uint32_t kEndiannessTest = 1;
+ const bool isLittleEndian = *reinterpret_cast<const uint8_t *>(&kEndiannessTest) == 1;
+ return isLittleEndian;
+}
+
+// Helper class to use a mutex with the control of boolean.
+class ConditionalMutex final : angle::NonCopyable
+{
+ public:
+ ConditionalMutex() : mUseMutex(true) {}
+ void init(bool useMutex) { mUseMutex = useMutex; }
+ void lock()
+ {
+ if (mUseMutex)
+ {
+ mMutex.lock();
+ }
+ }
+ void unlock()
+ {
+ if (mUseMutex)
+ {
+ mMutex.unlock();
+ }
+ }
+
+ private:
+ std::mutex mMutex;
+ bool mUseMutex;
+};
+
+// snprintf is not defined with MSVC prior to to msvc14
+#if defined(_MSC_VER) && _MSC_VER < 1900
+# define snprintf _snprintf
+#endif
+
+#define GL_A1RGB5_ANGLEX 0x6AC5
+#define GL_BGRX8_ANGLEX 0x6ABA
+#define GL_BGR565_ANGLEX 0x6ABB
+#define GL_BGRA4_ANGLEX 0x6ABC
+#define GL_BGR5_A1_ANGLEX 0x6ABD
+#define GL_INT_64_ANGLEX 0x6ABE
+#define GL_UINT_64_ANGLEX 0x6ABF
+#define GL_BGRA8_SRGB_ANGLEX 0x6AC0
+#define GL_BGR10_A2_ANGLEX 0x6AF9
+
+// These are fake formats used to fit typeless D3D textures that can be bound to EGL pbuffers into
+// the format system (for extension EGL_ANGLE_d3d_texture_client_buffer):
+#define GL_RGBA8_TYPELESS_ANGLEX 0x6AC1
+#define GL_RGBA8_TYPELESS_SRGB_ANGLEX 0x6AC2
+#define GL_BGRA8_TYPELESS_ANGLEX 0x6AC3
+#define GL_BGRA8_TYPELESS_SRGB_ANGLEX 0x6AC4
+
+#define GL_R8_SSCALED_ANGLEX 0x6AC6
+#define GL_RG8_SSCALED_ANGLEX 0x6AC7
+#define GL_RGB8_SSCALED_ANGLEX 0x6AC8
+#define GL_RGBA8_SSCALED_ANGLEX 0x6AC9
+#define GL_R8_USCALED_ANGLEX 0x6ACA
+#define GL_RG8_USCALED_ANGLEX 0x6ACB
+#define GL_RGB8_USCALED_ANGLEX 0x6ACC
+#define GL_RGBA8_USCALED_ANGLEX 0x6ACD
+
+#define GL_R16_SSCALED_ANGLEX 0x6ACE
+#define GL_RG16_SSCALED_ANGLEX 0x6ACF
+#define GL_RGB16_SSCALED_ANGLEX 0x6AD0
+#define GL_RGBA16_SSCALED_ANGLEX 0x6AD1
+#define GL_R16_USCALED_ANGLEX 0x6AD2
+#define GL_RG16_USCALED_ANGLEX 0x6AD3
+#define GL_RGB16_USCALED_ANGLEX 0x6AD4
+#define GL_RGBA16_USCALED_ANGLEX 0x6AD5
+
+#define GL_R32_SSCALED_ANGLEX 0x6AD6
+#define GL_RG32_SSCALED_ANGLEX 0x6AD7
+#define GL_RGB32_SSCALED_ANGLEX 0x6AD8
+#define GL_RGBA32_SSCALED_ANGLEX 0x6AD9
+#define GL_R32_USCALED_ANGLEX 0x6ADA
+#define GL_RG32_USCALED_ANGLEX 0x6ADB
+#define GL_RGB32_USCALED_ANGLEX 0x6ADC
+#define GL_RGBA32_USCALED_ANGLEX 0x6ADD
+
+#define GL_R32_SNORM_ANGLEX 0x6ADE
+#define GL_RG32_SNORM_ANGLEX 0x6ADF
+#define GL_RGB32_SNORM_ANGLEX 0x6AE0
+#define GL_RGBA32_SNORM_ANGLEX 0x6AE1
+#define GL_R32_UNORM_ANGLEX 0x6AE2
+#define GL_RG32_UNORM_ANGLEX 0x6AE3
+#define GL_RGB32_UNORM_ANGLEX 0x6AE4
+#define GL_RGBA32_UNORM_ANGLEX 0x6AE5
+
+#define GL_R32_FIXED_ANGLEX 0x6AE6
+#define GL_RG32_FIXED_ANGLEX 0x6AE7
+#define GL_RGB32_FIXED_ANGLEX 0x6AE8
+#define GL_RGBA32_FIXED_ANGLEX 0x6AE9
+
+#define GL_RGB10_A2_SINT_ANGLEX 0x6AEA
+#define GL_RGB10_A2_SNORM_ANGLEX 0x6AEB
+#define GL_RGB10_A2_SSCALED_ANGLEX 0x6AEC
+#define GL_RGB10_A2_USCALED_ANGLEX 0x6AED
+
+// EXT_texture_type_2_10_10_10_REV
+#define GL_RGB10_UNORM_ANGLEX 0x6AEE
+
+// These are fake formats for OES_vertex_type_10_10_10_2
+#define GL_A2_RGB10_UNORM_ANGLEX 0x6AEF
+#define GL_A2_RGB10_SNORM_ANGLEX 0x6AF0
+#define GL_A2_RGB10_USCALED_ANGLEX 0x6AF1
+#define GL_A2_RGB10_SSCALED_ANGLEX 0x6AF2
+#define GL_X2_RGB10_UINT_ANGLEX 0x6AF3
+#define GL_X2_RGB10_SINT_ANGLEX 0x6AF4
+#define GL_X2_RGB10_USCALED_ANGLEX 0x6AF5
+#define GL_X2_RGB10_SSCALED_ANGLEX 0x6AF6
+#define GL_X2_RGB10_UNORM_ANGLEX 0x6AF7
+#define GL_X2_RGB10_SNORM_ANGLEX 0x6AF8
+
+#define ANGLE_CHECK_GL_ALLOC(context, result) \
+ ANGLE_CHECK(context, result, "Failed to allocate host memory", GL_OUT_OF_MEMORY)
+
+#define ANGLE_CHECK_GL_MATH(context, result) \
+ ANGLE_CHECK(context, result, "Integer overflow.", GL_INVALID_OPERATION)
+
+#define ANGLE_GL_UNREACHABLE(context) \
+ UNREACHABLE(); \
+ ANGLE_CHECK(context, false, "Unreachable Code.", GL_INVALID_OPERATION)
+
+#if defined(ANGLE_WITH_LSAN)
+# define ANGLE_SCOPED_DISABLE_LSAN() __lsan::ScopedDisabler lsanDisabler
+#else
+# define ANGLE_SCOPED_DISABLE_LSAN()
+#endif
+
+#if defined(ANGLE_WITH_MSAN)
+class MsanScopedDisableInterceptorChecks final : angle::NonCopyable
+{
+ public:
+ MsanScopedDisableInterceptorChecks() { __msan_scoped_disable_interceptor_checks(); }
+ ~MsanScopedDisableInterceptorChecks() { __msan_scoped_enable_interceptor_checks(); }
+};
+# define ANGLE_SCOPED_DISABLE_MSAN() \
+ MsanScopedDisableInterceptorChecks msanScopedDisableInterceptorChecks
+#else
+# define ANGLE_SCOPED_DISABLE_MSAN()
+#endif
+
+// The ANGLE_NO_SANITIZE_MEMORY macro suppresses MemorySanitizer checks for
+// use-of-uninitialized-data. It can be used to decorate functions with known
+// false positives.
+#ifdef __clang__
+# define ANGLE_NO_SANITIZE_MEMORY __attribute__((no_sanitize_memory))
+#else
+# define ANGLE_NO_SANITIZE_MEMORY
+#endif
+
+// Similar to the above, but for thread sanitization.
+#ifdef __clang__
+# define ANGLE_NO_SANITIZE_THREAD __attribute__((no_sanitize_thread))
+#else
+# define ANGLE_NO_SANITIZE_THREAD
+#endif
+
+// The below inlining code lifted from V8.
+#if defined(__clang__) || (defined(__GNUC__) && defined(__has_attribute))
+# define ANGLE_HAS_ATTRIBUTE_ALWAYS_INLINE (__has_attribute(always_inline))
+# define ANGLE_HAS___FORCEINLINE 0
+#elif defined(_MSC_VER)
+# define ANGLE_HAS_ATTRIBUTE_ALWAYS_INLINE 0
+# define ANGLE_HAS___FORCEINLINE 1
+#else
+# define ANGLE_HAS_ATTRIBUTE_ALWAYS_INLINE 0
+# define ANGLE_HAS___FORCEINLINE 0
+#endif
+
+#if defined(NDEBUG) && ANGLE_HAS_ATTRIBUTE_ALWAYS_INLINE
+# define ANGLE_INLINE inline __attribute__((always_inline))
+#elif defined(NDEBUG) && ANGLE_HAS___FORCEINLINE
+# define ANGLE_INLINE __forceinline
+#else
+# define ANGLE_INLINE inline
+#endif
+
+#if defined(__clang__) || (defined(__GNUC__) && defined(__has_attribute))
+# if __has_attribute(noinline)
+# define ANGLE_NOINLINE __attribute__((noinline))
+# else
+# define ANGLE_NOINLINE
+# endif
+#elif defined(_MSC_VER)
+# define ANGLE_NOINLINE __declspec(noinline)
+#else
+# define ANGLE_NOINLINE
+#endif
+
+#if defined(__clang__) || (defined(__GNUC__) && defined(__has_attribute))
+# if __has_attribute(format)
+# define ANGLE_FORMAT_PRINTF(fmt, args) __attribute__((format(__printf__, fmt, args)))
+# else
+# define ANGLE_FORMAT_PRINTF(fmt, args)
+# endif
+#else
+# define ANGLE_FORMAT_PRINTF(fmt, args)
+#endif
+
+ANGLE_FORMAT_PRINTF(1, 0)
+size_t FormatStringIntoVector(const char *fmt, va_list vararg, std::vector<char> &buffer);
+
+// Format messes up the # inside the macro.
+// clang-format off
+#ifndef ANGLE_STRINGIFY
+# define ANGLE_STRINGIFY(x) #x
+#endif
+// clang-format on
+
+#ifndef ANGLE_MACRO_STRINGIFY
+# define ANGLE_MACRO_STRINGIFY(x) ANGLE_STRINGIFY(x)
+#endif
+
+#if __has_cpp_attribute(clang::require_constant_initialization)
+# define ANGLE_REQUIRE_CONSTANT_INIT [[clang::require_constant_initialization]]
+#else
+# define ANGLE_REQUIRE_CONSTANT_INIT
+#endif // __has_cpp_attribute(require_constant_initialization)
+
+// Compiler configs.
+inline bool IsASan()
+{
+#if defined(ANGLE_WITH_ASAN)
+ return true;
+#else
+ return false;
+#endif // defined(ANGLE_WITH_ASAN)
+}
+
+inline bool IsMSan()
+{
+#if defined(ANGLE_WITH_MSAN)
+ return true;
+#else
+ return false;
+#endif // defined(ANGLE_WITH_MSAN)
+}
+
+inline bool IsTSan()
+{
+#if defined(ANGLE_WITH_TSAN)
+ return true;
+#else
+ return false;
+#endif // defined(ANGLE_WITH_TSAN)
+}
+
+inline bool IsUBSan()
+{
+#if defined(ANGLE_WITH_UBSAN)
+ return true;
+#else
+ return false;
+#endif // defined(ANGLE_WITH_UBSAN)
+}
+#endif // COMMON_ANGLEUTILS_H_
diff --git a/gfx/angle/checkout/src/common/apple_platform_utils.h b/gfx/angle/checkout/src/common/apple_platform_utils.h
new file mode 100644
index 0000000000..da932a9207
--- /dev/null
+++ b/gfx/angle/checkout/src/common/apple_platform_utils.h
@@ -0,0 +1,90 @@
+//
+// Copyright 2019 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+
+// apple_platform_utils.h: Common utilities for Apple platforms.
+
+#ifndef COMMON_APPLE_PLATFORM_UTILS_H_
+#define COMMON_APPLE_PLATFORM_UTILS_H_
+
+#include "common/platform.h"
+
+// These are macros for substitution of Apple specific directive @available:
+
+// TARGET_OS_MACCATALYST only available in MacSDK 10.15
+
+#if TARGET_OS_MACCATALYST
+// ANGLE_APPLE_AVAILABLE_XCI: check if either of the 3 platforms (OSX/Catalyst/iOS) min verions is
+// available:
+# define ANGLE_APPLE_AVAILABLE_XCI(macVer, macCatalystVer, iOSVer) \
+ @available(macOS macVer, macCatalyst macCatalystVer, iOS iOSVer, *)
+// ANGLE_APPLE_AVAILABLE_XC: check if either of the 2 platforms (OSX/Catalyst) min verions is
+// available:
+# define ANGLE_APPLE_AVAILABLE_XC(macVer, macCatalystVer) \
+ @available(macOS macVer, macCatalyst macCatalystVer, *)
+// ANGLE_APPLE_AVAILABLE_CI: check if either of the 2 platforms (Catalyst/iOS) min verions is
+// available:
+# define ANGLE_APPLE_AVAILABLE_CI(macCatalystVer, iOSVer) \
+ @available(macCatalyst macCatalystVer, iOS iOSVer, *)
+#else
+# define ANGLE_APPLE_AVAILABLE_XCI(macVer, macCatalystVer, iOSVer) \
+ ANGLE_APPLE_AVAILABLE_XI(macVer, iOSVer)
+
+# define ANGLE_APPLE_AVAILABLE_XC(macVer, macCatalystVer) @available(macOS macVer, *)
+# define ANGLE_APPLE_AVAILABLE_CI(macCatalystVer, iOSVer) @available(iOS iOSVer, tvOS iOSVer, *)
+#endif
+
+// ANGLE_APPLE_AVAILABLE_XI: check if either of the 2 platforms (OSX/iOS) min verions is available:
+#define ANGLE_APPLE_AVAILABLE_XI(macVer, iOSVer) \
+ @available(macOS macVer, iOS iOSVer, tvOS iOSVer, *)
+
+// ANGLE_APPLE_AVAILABLE_I: check if a particular iOS version is available
+#define ANGLE_APPLE_AVAILABLE_I(iOSVer) @available(iOS iOSVer, tvOS iOSVer, *)
+
+#if TARGET_OS_IPHONE
+# if !defined(__IPHONE_11_0)
+# define __IPHONE_11_0 110000
+# endif
+# if !defined(ANGLE_IOS_DEPLOY_TARGET)
+# define ANGLE_IOS_DEPLOY_TARGET __IPHONE_11_0
+# endif
+# if !defined(__IPHONE_OS_VERSION_MAX_ALLOWED)
+# define __IPHONE_OS_VERSION_MAX_ALLOWED __IPHONE_11_0
+# endif
+# if !defined(__TV_OS_VERSION_MAX_ALLOWED)
+# define __TV_OS_VERSION_MAX_ALLOWED __IPHONE_11_0
+# endif
+#endif
+
+#if !defined(TARGET_OS_MACCATALYST)
+# define TARGET_OS_MACCATALYST 0
+#endif
+
+#if defined(__ARM_ARCH)
+# define ANGLE_APPLE_IS_ARM (__ARM_ARCH != 0)
+#else
+# define ANGLE_APPLE_IS_ARM 0
+#endif
+
+#define ANGLE_APPLE_OBJC_SCOPE @autoreleasepool
+
+#if !__has_feature(objc_arc)
+# define ANGLE_APPLE_AUTORELEASE autorelease
+# define ANGLE_APPLE_RETAIN retain
+# define ANGLE_APPLE_RELEASE release
+#else
+# define ANGLE_APPLE_AUTORELEASE self
+# define ANGLE_APPLE_RETAIN self
+# define ANGLE_APPLE_RELEASE self
+#endif
+
+#define ANGLE_APPLE_UNUSED __attribute__((unused))
+
+namespace angle
+{
+bool IsMetalRendererAvailable();
+}
+
+#endif
diff --git a/gfx/angle/checkout/src/common/bitset_utils.h b/gfx/angle/checkout/src/common/bitset_utils.h
new file mode 100644
index 0000000000..ee9a3f1b9b
--- /dev/null
+++ b/gfx/angle/checkout/src/common/bitset_utils.h
@@ -0,0 +1,1106 @@
+//
+// Copyright 2015 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// bitset_utils:
+// Bitset-related helper classes, such as a fast iterator to scan for set bits.
+//
+
+#ifndef COMMON_BITSETITERATOR_H_
+#define COMMON_BITSETITERATOR_H_
+
+#include <stdint.h>
+
+#include <array>
+
+#include "common/angleutils.h"
+#include "common/debug.h"
+#include "common/mathutil.h"
+#include "common/platform.h"
+
+namespace angle
+{
+// Given x, create 1 << x.
+template <typename BitsT, typename ParamT>
+constexpr BitsT Bit(ParamT x)
+{
+ // It's undefined behavior if the shift size is equal to or larger than the width of the type.
+ ASSERT(static_cast<size_t>(x) < sizeof(BitsT) * 8);
+
+ return (static_cast<BitsT>(1) << static_cast<size_t>(x));
+}
+
+// Given x, create (1 << x) - 1, i.e. a mask with x bits set.
+template <typename BitsT, typename ParamT>
+constexpr BitsT BitMask(ParamT x)
+{
+ if (static_cast<size_t>(x) == 0)
+ {
+ return 0;
+ }
+ return ((Bit<BitsT>(static_cast<ParamT>(static_cast<size_t>(x) - 1)) - 1) << 1) | 1;
+}
+
+template <size_t N, typename BitsT, typename ParamT = std::size_t>
+class BitSetT final
+{
+ public:
+ class Reference final
+ {
+ public:
+ ~Reference() {}
+ Reference &operator=(bool x)
+ {
+ mParent->set(mBit, x);
+ return *this;
+ }
+ explicit operator bool() const { return mParent->test(mBit); }
+
+ private:
+ friend class BitSetT;
+
+ Reference(BitSetT *parent, ParamT bit) : mParent(parent), mBit(bit) {}
+
+ BitSetT *mParent;
+ ParamT mBit;
+ };
+
+ class Iterator final
+ {
+ public:
+ Iterator(const BitSetT &bits);
+ Iterator &operator++();
+
+ bool operator==(const Iterator &other) const;
+ bool operator!=(const Iterator &other) const;
+ ParamT operator*() const;
+
+ // These helper functions allow mutating an iterator in-flight.
+ // They only operate on later bits to ensure we don't iterate the same bit twice.
+ void resetLaterBit(std::size_t index)
+ {
+ ASSERT(index > mCurrentBit);
+ mBitsCopy.reset(index);
+ }
+
+ void setLaterBit(std::size_t index)
+ {
+ ASSERT(index > mCurrentBit);
+ mBitsCopy.set(index);
+ }
+
+ void setLaterBits(const BitSetT &bits)
+ {
+ ASSERT((BitSetT(bits) &= Mask(mCurrentBit + 1)).none());
+ mBitsCopy |= bits;
+ }
+
+ private:
+ std::size_t getNextBit();
+
+ BitSetT mBitsCopy;
+ std::size_t mCurrentBit;
+ };
+
+ using value_type = BitsT;
+ using param_type = ParamT;
+
+ constexpr BitSetT();
+ constexpr explicit BitSetT(BitsT value);
+ constexpr explicit BitSetT(std::initializer_list<ParamT> init);
+
+ constexpr BitSetT(const BitSetT &other);
+ constexpr BitSetT &operator=(const BitSetT &other);
+
+ constexpr bool operator==(const BitSetT &other) const;
+ constexpr bool operator!=(const BitSetT &other) const;
+
+ constexpr bool operator[](ParamT pos) const;
+ Reference operator[](ParamT pos) { return Reference(this, pos); }
+
+ constexpr bool test(ParamT pos) const;
+
+ constexpr bool all() const;
+ constexpr bool any() const;
+ constexpr bool none() const;
+ constexpr std::size_t count() const;
+
+ constexpr static std::size_t size() { return N; }
+
+ constexpr BitSetT &operator&=(const BitSetT &other);
+ constexpr BitSetT &operator|=(const BitSetT &other);
+ constexpr BitSetT &operator^=(const BitSetT &other);
+ constexpr BitSetT operator~() const;
+
+ constexpr BitSetT &operator&=(BitsT value);
+ constexpr BitSetT &operator|=(BitsT value);
+ constexpr BitSetT &operator^=(BitsT value);
+
+ constexpr BitSetT operator<<(std::size_t pos) const;
+ constexpr BitSetT &operator<<=(std::size_t pos);
+ constexpr BitSetT operator>>(std::size_t pos) const;
+ constexpr BitSetT &operator>>=(std::size_t pos);
+
+ constexpr BitSetT &set();
+ constexpr BitSetT &set(ParamT pos, bool value = true);
+
+ constexpr BitSetT &reset();
+ constexpr BitSetT &reset(ParamT pos);
+
+ constexpr BitSetT &flip();
+ constexpr BitSetT &flip(ParamT pos);
+
+ constexpr unsigned long to_ulong() const { return static_cast<unsigned long>(mBits); }
+ constexpr BitsT bits() const { return mBits; }
+
+ Iterator begin() const { return Iterator(*this); }
+ Iterator end() const { return Iterator(BitSetT()); }
+
+ constexpr static BitSetT Zero() { return BitSetT(); }
+
+ constexpr ParamT first() const;
+ constexpr ParamT last() const;
+
+ // Produces a mask of ones up to the "x"th bit.
+ constexpr static BitsT Mask(std::size_t x) { return BitMask<BitsT>(static_cast<ParamT>(x)); }
+
+ private:
+ BitsT mBits;
+};
+
+template <size_t N, typename BitsT, typename ParamT>
+constexpr BitSetT<N, BitsT, ParamT>::BitSetT() : mBits(0)
+{
+ static_assert(N > 0, "Bitset type cannot support zero bits.");
+ static_assert(N <= sizeof(BitsT) * 8, "Bitset type cannot support a size this large.");
+}
+
+template <size_t N, typename BitsT, typename ParamT>
+constexpr BitSetT<N, BitsT, ParamT>::BitSetT(BitsT value) : mBits(value & Mask(N))
+{}
+
+template <size_t N, typename BitsT, typename ParamT>
+constexpr BitSetT<N, BitsT, ParamT>::BitSetT(std::initializer_list<ParamT> init) : mBits(0)
+{
+ for (ParamT element : init)
+ {
+ mBits |= Bit<BitsT>(element);
+ }
+ ASSERT(mBits == (mBits & Mask(N)));
+}
+
+template <size_t N, typename BitsT, typename ParamT>
+constexpr BitSetT<N, BitsT, ParamT>::BitSetT(const BitSetT &other) : mBits(other.mBits)
+{}
+
+template <size_t N, typename BitsT, typename ParamT>
+constexpr BitSetT<N, BitsT, ParamT> &BitSetT<N, BitsT, ParamT>::operator=(const BitSetT &other)
+{
+ mBits = other.mBits;
+ return *this;
+}
+
+template <size_t N, typename BitsT, typename ParamT>
+constexpr bool BitSetT<N, BitsT, ParamT>::operator==(const BitSetT &other) const
+{
+ return mBits == other.mBits;
+}
+
+template <size_t N, typename BitsT, typename ParamT>
+constexpr bool BitSetT<N, BitsT, ParamT>::operator!=(const BitSetT &other) const
+{
+ return mBits != other.mBits;
+}
+
+template <size_t N, typename BitsT, typename ParamT>
+constexpr bool BitSetT<N, BitsT, ParamT>::operator[](ParamT pos) const
+{
+ return test(pos);
+}
+
+template <size_t N, typename BitsT, typename ParamT>
+constexpr bool BitSetT<N, BitsT, ParamT>::test(ParamT pos) const
+{
+ return (mBits & Bit<BitsT>(pos)) != 0;
+}
+
+template <size_t N, typename BitsT, typename ParamT>
+constexpr bool BitSetT<N, BitsT, ParamT>::all() const
+{
+ ASSERT(mBits == (mBits & Mask(N)));
+ return mBits == Mask(N);
+}
+
+template <size_t N, typename BitsT, typename ParamT>
+constexpr bool BitSetT<N, BitsT, ParamT>::any() const
+{
+ ASSERT(mBits == (mBits & Mask(N)));
+ return (mBits != 0);
+}
+
+template <size_t N, typename BitsT, typename ParamT>
+constexpr bool BitSetT<N, BitsT, ParamT>::none() const
+{
+ ASSERT(mBits == (mBits & Mask(N)));
+ return (mBits == 0);
+}
+
+template <size_t N, typename BitsT, typename ParamT>
+constexpr std::size_t BitSetT<N, BitsT, ParamT>::count() const
+{
+ return gl::BitCount(mBits);
+}
+
+template <size_t N, typename BitsT, typename ParamT>
+constexpr BitSetT<N, BitsT, ParamT> &BitSetT<N, BitsT, ParamT>::operator&=(const BitSetT &other)
+{
+ mBits &= other.mBits;
+ return *this;
+}
+
+template <size_t N, typename BitsT, typename ParamT>
+constexpr BitSetT<N, BitsT, ParamT> &BitSetT<N, BitsT, ParamT>::operator|=(const BitSetT &other)
+{
+ mBits |= other.mBits;
+ return *this;
+}
+
+template <size_t N, typename BitsT, typename ParamT>
+constexpr BitSetT<N, BitsT, ParamT> &BitSetT<N, BitsT, ParamT>::operator^=(const BitSetT &other)
+{
+ mBits = mBits ^ other.mBits;
+ return *this;
+}
+
+template <size_t N, typename BitsT, typename ParamT>
+constexpr BitSetT<N, BitsT, ParamT> BitSetT<N, BitsT, ParamT>::operator~() const
+{
+ return BitSetT<N, BitsT, ParamT>(~mBits & Mask(N));
+}
+
+template <size_t N, typename BitsT, typename ParamT>
+constexpr BitSetT<N, BitsT, ParamT> &BitSetT<N, BitsT, ParamT>::operator&=(BitsT value)
+{
+ mBits &= value;
+ return *this;
+}
+
+template <size_t N, typename BitsT, typename ParamT>
+constexpr BitSetT<N, BitsT, ParamT> &BitSetT<N, BitsT, ParamT>::operator|=(BitsT value)
+{
+ mBits |= value;
+ ASSERT(mBits == (mBits & Mask(N)));
+ return *this;
+}
+
+template <size_t N, typename BitsT, typename ParamT>
+constexpr BitSetT<N, BitsT, ParamT> &BitSetT<N, BitsT, ParamT>::operator^=(BitsT value)
+{
+ mBits ^= value;
+ ASSERT(mBits == (mBits & Mask(N)));
+ return *this;
+}
+
+template <size_t N, typename BitsT, typename ParamT>
+constexpr BitSetT<N, BitsT, ParamT> BitSetT<N, BitsT, ParamT>::operator<<(std::size_t pos) const
+{
+ return BitSetT<N, BitsT, ParamT>((mBits << pos) & Mask(N));
+}
+
+template <size_t N, typename BitsT, typename ParamT>
+constexpr BitSetT<N, BitsT, ParamT> &BitSetT<N, BitsT, ParamT>::operator<<=(std::size_t pos)
+{
+ mBits = mBits << pos & Mask(N);
+ return *this;
+}
+
+template <size_t N, typename BitsT, typename ParamT>
+constexpr BitSetT<N, BitsT, ParamT> BitSetT<N, BitsT, ParamT>::operator>>(std::size_t pos) const
+{
+ return BitSetT<N, BitsT, ParamT>(mBits >> pos);
+}
+
+template <size_t N, typename BitsT, typename ParamT>
+constexpr BitSetT<N, BitsT, ParamT> &BitSetT<N, BitsT, ParamT>::operator>>=(std::size_t pos)
+{
+ mBits = (mBits >> pos) & Mask(N);
+ return *this;
+}
+
+template <size_t N, typename BitsT, typename ParamT>
+constexpr BitSetT<N, BitsT, ParamT> &BitSetT<N, BitsT, ParamT>::set()
+{
+ ASSERT(mBits == (mBits & Mask(N)));
+ mBits = Mask(N);
+ return *this;
+}
+
+template <size_t N, typename BitsT, typename ParamT>
+constexpr BitSetT<N, BitsT, ParamT> &BitSetT<N, BitsT, ParamT>::set(ParamT pos, bool value)
+{
+ ASSERT(static_cast<size_t>(pos) < N);
+ if (value)
+ {
+ mBits |= Bit<BitsT>(pos);
+ }
+ else
+ {
+ reset(pos);
+ }
+ ASSERT(mBits == (mBits & Mask(N)));
+ return *this;
+}
+
+template <size_t N, typename BitsT, typename ParamT>
+constexpr BitSetT<N, BitsT, ParamT> &BitSetT<N, BitsT, ParamT>::reset()
+{
+ ASSERT(mBits == (mBits & Mask(N)));
+ mBits = 0;
+ return *this;
+}
+
+template <size_t N, typename BitsT, typename ParamT>
+constexpr BitSetT<N, BitsT, ParamT> &BitSetT<N, BitsT, ParamT>::reset(ParamT pos)
+{
+ ASSERT(static_cast<size_t>(pos) < N);
+ ASSERT(mBits == (mBits & Mask(N)));
+ mBits &= ~Bit<BitsT>(pos);
+ return *this;
+}
+
+template <size_t N, typename BitsT, typename ParamT>
+constexpr BitSetT<N, BitsT, ParamT> &BitSetT<N, BitsT, ParamT>::flip()
+{
+ ASSERT(mBits == (mBits & Mask(N)));
+ mBits ^= Mask(N);
+ return *this;
+}
+
+template <size_t N, typename BitsT, typename ParamT>
+constexpr BitSetT<N, BitsT, ParamT> &BitSetT<N, BitsT, ParamT>::flip(ParamT pos)
+{
+ ASSERT(static_cast<size_t>(pos) < N);
+ mBits ^= Bit<BitsT>(pos);
+ ASSERT(mBits == (mBits & Mask(N)));
+ return *this;
+}
+
+template <size_t N, typename BitsT, typename ParamT>
+constexpr ParamT BitSetT<N, BitsT, ParamT>::first() const
+{
+ ASSERT(!none());
+ return static_cast<ParamT>(gl::ScanForward(mBits));
+}
+
+template <size_t N, typename BitsT, typename ParamT>
+constexpr ParamT BitSetT<N, BitsT, ParamT>::last() const
+{
+ ASSERT(!none());
+ return static_cast<ParamT>(gl::ScanReverse(mBits));
+}
+
+template <size_t N, typename BitsT, typename ParamT>
+BitSetT<N, BitsT, ParamT>::Iterator::Iterator(const BitSetT &bits) : mBitsCopy(bits), mCurrentBit(0)
+{
+ if (bits.any())
+ {
+ mCurrentBit = getNextBit();
+ }
+}
+
+template <size_t N, typename BitsT, typename ParamT>
+ANGLE_INLINE typename BitSetT<N, BitsT, ParamT>::Iterator &
+BitSetT<N, BitsT, ParamT>::Iterator::operator++()
+{
+ ASSERT(mBitsCopy.any());
+ mBitsCopy.reset(static_cast<ParamT>(mCurrentBit));
+ mCurrentBit = getNextBit();
+ return *this;
+}
+
+template <size_t N, typename BitsT, typename ParamT>
+bool BitSetT<N, BitsT, ParamT>::Iterator::operator==(const Iterator &other) const
+{
+ return mBitsCopy == other.mBitsCopy;
+}
+
+template <size_t N, typename BitsT, typename ParamT>
+bool BitSetT<N, BitsT, ParamT>::Iterator::operator!=(const Iterator &other) const
+{
+ return !(*this == other);
+}
+
+template <size_t N, typename BitsT, typename ParamT>
+ParamT BitSetT<N, BitsT, ParamT>::Iterator::operator*() const
+{
+ return static_cast<ParamT>(mCurrentBit);
+}
+
+template <size_t N, typename BitsT, typename ParamT>
+std::size_t BitSetT<N, BitsT, ParamT>::Iterator::getNextBit()
+{
+ if (mBitsCopy.none())
+ {
+ return 0;
+ }
+
+ return gl::ScanForward(mBitsCopy.mBits);
+}
+
+template <size_t N>
+using BitSet8 = BitSetT<N, uint8_t>;
+
+template <size_t N>
+using BitSet16 = BitSetT<N, uint16_t>;
+
+template <size_t N>
+using BitSet32 = BitSetT<N, uint32_t>;
+
+template <size_t N>
+using BitSet64 = BitSetT<N, uint64_t>;
+
+template <std::size_t N>
+class BitSetArray;
+
+namespace priv
+{
+
+template <size_t N, typename T>
+using EnableIfBitsFit = typename std::enable_if<N <= sizeof(T) * 8>::type;
+
+template <size_t N, typename Enable = void>
+struct GetBitSet
+{
+ using Type = BitSetArray<N>;
+};
+
+// Prefer 64-bit bitsets on 64-bit CPUs. They seem faster than 32-bit.
+#if defined(ANGLE_IS_64_BIT_CPU)
+template <size_t N>
+struct GetBitSet<N, EnableIfBitsFit<N, uint64_t>>
+{
+ using Type = BitSet64<N>;
+};
+constexpr std::size_t kDefaultBitSetSize = 64;
+using BaseBitSetType = BitSet64<kDefaultBitSetSize>;
+#else
+template <size_t N>
+struct GetBitSet<N, EnableIfBitsFit<N, uint32_t>>
+{
+ using Type = BitSet32<N>;
+};
+constexpr std::size_t kDefaultBitSetSize = 32;
+using BaseBitSetType = BitSet32<kDefaultBitSetSize>;
+#endif // defined(ANGLE_IS_64_BIT_CPU)
+
+} // namespace priv
+
+template <size_t N>
+using BitSet = typename priv::GetBitSet<N>::Type;
+
+template <std::size_t N>
+class BitSetArray final
+{
+ public:
+ using BaseBitSet = priv::BaseBitSetType;
+ using value_type = BaseBitSet::value_type;
+ using param_type = BaseBitSet::param_type;
+
+ constexpr BitSetArray();
+ constexpr explicit BitSetArray(std::initializer_list<param_type> init);
+
+ BitSetArray(const BitSetArray<N> &other);
+
+ class Reference final
+ {
+ public:
+ ~Reference() {}
+ Reference &operator=(bool x)
+ {
+ mParent.set(mPosition, x);
+ return *this;
+ }
+ explicit operator bool() const { return mParent.test(mPosition); }
+
+ private:
+ friend class BitSetArray;
+
+ Reference(BitSetArray &parent, std::size_t pos) : mParent(parent), mPosition(pos) {}
+
+ BitSetArray &mParent;
+ std::size_t mPosition;
+ };
+ class Iterator final
+ {
+ public:
+ Iterator(const BitSetArray<N> &bitSetArray, std::size_t index);
+ Iterator &operator++();
+ bool operator==(const Iterator &other) const;
+ bool operator!=(const Iterator &other) const;
+ size_t operator*() const;
+
+ // These helper functions allow mutating an iterator in-flight.
+ // They only operate on later bits to ensure we don't iterate the same bit twice.
+ void resetLaterBit(std::size_t pos)
+ {
+ ASSERT(pos > (mIndex * priv::kDefaultBitSetSize) + *mCurrentIterator);
+ prepareCopy();
+ mParentCopy.reset(pos);
+ updateIteratorBit(pos, false);
+ }
+
+ void setLaterBit(std::size_t pos)
+ {
+ ASSERT(pos > (mIndex * priv::kDefaultBitSetSize) + *mCurrentIterator);
+ prepareCopy();
+ mParentCopy.set(pos);
+ updateIteratorBit(pos, true);
+ }
+
+ void setLaterBits(const BitSetArray &bits)
+ {
+ prepareCopy();
+ mParentCopy |= bits;
+ updateIteratorBits(bits);
+ }
+
+ private:
+ ANGLE_INLINE void prepareCopy()
+ {
+ ASSERT(mParent.mBaseBitSetArray[mIndex].end() ==
+ mParentCopy.mBaseBitSetArray[mIndex].end());
+ if (mParentCopy.none())
+ {
+ mParentCopy = mParent;
+ mCurrentParent = &mParentCopy;
+ }
+ }
+
+ ANGLE_INLINE void updateIteratorBit(std::size_t pos, bool setBit)
+ {
+ // Get the index and offset, update current interator if within range
+ size_t index = pos >> kShiftForDivision;
+ size_t offset = pos & kDefaultBitSetSizeMinusOne;
+ if (index == mIndex)
+ {
+ if (setBit)
+ {
+ mCurrentIterator.setLaterBit(offset);
+ }
+ else
+ {
+ mCurrentIterator.resetLaterBit(offset);
+ }
+ }
+ }
+
+ ANGLE_INLINE void updateIteratorBits(const BitSetArray &bits)
+ {
+ mCurrentIterator.setLaterBits(bits.mBaseBitSetArray[mIndex]);
+ }
+
+ // Problem -
+ // We want to provide the fastest path possible for usecases that iterate though the bitset.
+ //
+ // Options -
+ // 1) For non-mutating iterations the const ref <mParent> is set as mCurrentParent and only
+ // for usecases that need to mutate the bitset while iterating we perform a copy of
+ // <mParent> into <mParentCopy> and modify its bits accordingly.
+ // 2) The alternate approach was to perform a copy all the time in the constructor
+ // irrespective of whether it was a mutating usecase or not.
+ //
+ // Experiment -
+ // BitSetIteratorPerfTest was run on a Windows machine with Intel CPU and these were the
+ // results -
+ // 1) Copy only when necessary -
+ // RESULT BitSetIteratorPerf.wall_time: run = 116.1067374961 ns
+ // RESULT BitSetIteratorPerf.trial_steps : run = 8416124 count
+ // RESULT BitSetIteratorPerf.total_steps : run = 16832251 count
+ // 2) Copy always -
+ // RESULT BitSetIteratorPerf.wall_time: run = 242.7446459439 ns
+ // RESULT BitSetIteratorPerf.trial_steps : run = 4171416 count
+ // RESULT BitSetIteratorPerf.total_steps : run = 8342834 count
+ //
+ // Resolution -
+ // We settled on the copy only when necessary path.
+ size_t mIndex;
+ const BitSetArray &mParent;
+ BitSetArray mParentCopy;
+ const BitSetArray *mCurrentParent;
+ typename BaseBitSet::Iterator mCurrentIterator;
+ };
+
+ constexpr static std::size_t size() { return N; }
+ Iterator begin() const { return Iterator(*this, 0); }
+ Iterator end() const { return Iterator(*this, kArraySize); }
+ constexpr unsigned long to_ulong() const
+ {
+ // TODO(anglebug.com/5628): Handle serializing more than kDefaultBitSetSize
+ for (std::size_t index = 1; index < kArraySize; index++)
+ {
+ ASSERT(mBaseBitSetArray[index].none());
+ }
+ return static_cast<unsigned long>(mBaseBitSetArray[0].to_ulong());
+ }
+
+ // Assignment operators
+ constexpr BitSetArray &operator=(const BitSetArray &other);
+ constexpr BitSetArray &operator&=(const BitSetArray &other);
+ constexpr BitSetArray &operator|=(const BitSetArray &other);
+ constexpr BitSetArray &operator^=(const BitSetArray &other);
+
+ // Bitwise operators
+ constexpr BitSetArray<N> operator&(const angle::BitSetArray<N> &other) const;
+ constexpr BitSetArray<N> operator|(const angle::BitSetArray<N> &other) const;
+ constexpr BitSetArray<N> operator^(const angle::BitSetArray<N> &other) const;
+
+ // Relational Operators
+ constexpr bool operator==(const angle::BitSetArray<N> &other) const;
+ constexpr bool operator!=(const angle::BitSetArray<N> &other) const;
+
+ // Unary operators
+ constexpr BitSetArray operator~() const;
+ constexpr bool operator[](std::size_t pos) const;
+ constexpr Reference operator[](std::size_t pos)
+ {
+ ASSERT(pos < size());
+ return Reference(*this, pos);
+ }
+
+ // Setter, getters and other helper methods
+ constexpr BitSetArray &set();
+ constexpr BitSetArray &set(std::size_t pos, bool value = true);
+ constexpr BitSetArray &reset();
+ constexpr BitSetArray &reset(std::size_t pos);
+ constexpr bool test(std::size_t pos) const;
+ constexpr bool all() const;
+ constexpr bool any() const;
+ constexpr bool none() const;
+ constexpr std::size_t count() const;
+ constexpr bool intersects(const BitSetArray &other) const;
+ constexpr BitSetArray<N> &flip();
+ constexpr param_type first() const;
+ constexpr param_type last() const;
+
+ constexpr value_type bits(size_t index) const;
+
+ private:
+ static constexpr std::size_t kDefaultBitSetSizeMinusOne = priv::kDefaultBitSetSize - 1;
+ static constexpr std::size_t kShiftForDivision =
+ static_cast<std::size_t>(rx::Log2(static_cast<unsigned int>(priv::kDefaultBitSetSize)));
+ static constexpr std::size_t kArraySize =
+ ((N + kDefaultBitSetSizeMinusOne) >> kShiftForDivision);
+ constexpr static std::size_t kLastElementCount = (N & kDefaultBitSetSizeMinusOne);
+ constexpr static std::size_t kLastElementMask = priv::BaseBitSetType::Mask(
+ kLastElementCount == 0 ? priv::kDefaultBitSetSize : kLastElementCount);
+
+ std::array<BaseBitSet, kArraySize> mBaseBitSetArray;
+};
+
+template <std::size_t N>
+constexpr BitSetArray<N>::BitSetArray()
+{
+ static_assert(N > priv::kDefaultBitSetSize, "BitSetArray type can't support requested size.");
+ reset();
+}
+
+template <std::size_t N>
+constexpr BitSetArray<N>::BitSetArray(std::initializer_list<param_type> init)
+{
+ reset();
+
+ for (param_type element : init)
+ {
+ size_t index = element >> kShiftForDivision;
+ size_t offset = element & kDefaultBitSetSizeMinusOne;
+ mBaseBitSetArray[index].set(offset, true);
+ }
+}
+
+template <size_t N>
+BitSetArray<N>::BitSetArray(const BitSetArray<N> &other)
+{
+ for (std::size_t index = 0; index < kArraySize; index++)
+ {
+ mBaseBitSetArray[index] = other.mBaseBitSetArray[index];
+ }
+}
+
+template <size_t N>
+BitSetArray<N>::Iterator::Iterator(const BitSetArray<N> &bitSetArray, std::size_t index)
+ : mIndex(index),
+ mParent(bitSetArray),
+ mCurrentParent(&mParent),
+ mCurrentIterator(mParent.mBaseBitSetArray[0].begin())
+{
+ while (mIndex < mCurrentParent->kArraySize)
+ {
+ if (mCurrentParent->mBaseBitSetArray[mIndex].any())
+ {
+ break;
+ }
+ mIndex++;
+ }
+
+ if (mIndex < mCurrentParent->kArraySize)
+ {
+ mCurrentIterator = mCurrentParent->mBaseBitSetArray[mIndex].begin();
+ }
+ else
+ {
+ mCurrentIterator = mCurrentParent->mBaseBitSetArray[mCurrentParent->kArraySize - 1].end();
+ }
+}
+
+template <std::size_t N>
+typename BitSetArray<N>::Iterator &BitSetArray<N>::Iterator::operator++()
+{
+ ++mCurrentIterator;
+ while (mCurrentIterator == mCurrentParent->mBaseBitSetArray[mIndex].end())
+ {
+ mIndex++;
+ if (mIndex >= mCurrentParent->kArraySize)
+ {
+ break;
+ }
+ mCurrentIterator = mCurrentParent->mBaseBitSetArray[mIndex].begin();
+ }
+ return *this;
+}
+
+template <std::size_t N>
+bool BitSetArray<N>::Iterator::operator==(const BitSetArray<N>::Iterator &other) const
+{
+ return mCurrentIterator == other.mCurrentIterator;
+}
+
+template <std::size_t N>
+bool BitSetArray<N>::Iterator::operator!=(const BitSetArray<N>::Iterator &other) const
+{
+ return mCurrentIterator != other.mCurrentIterator;
+}
+
+template <std::size_t N>
+std::size_t BitSetArray<N>::Iterator::operator*() const
+{
+ return (mIndex * priv::kDefaultBitSetSize) + *mCurrentIterator;
+}
+
+template <std::size_t N>
+constexpr BitSetArray<N> &BitSetArray<N>::operator=(const BitSetArray<N> &other)
+{
+ for (std::size_t index = 0; index < kArraySize; index++)
+ {
+ mBaseBitSetArray[index] = other.mBaseBitSetArray[index];
+ }
+ return *this;
+}
+
+template <std::size_t N>
+constexpr BitSetArray<N> &BitSetArray<N>::operator&=(const BitSetArray<N> &other)
+{
+ for (std::size_t index = 0; index < kArraySize; index++)
+ {
+ mBaseBitSetArray[index] &= other.mBaseBitSetArray[index];
+ }
+ return *this;
+}
+
+template <std::size_t N>
+constexpr BitSetArray<N> &BitSetArray<N>::operator|=(const BitSetArray<N> &other)
+{
+ for (std::size_t index = 0; index < kArraySize; index++)
+ {
+ mBaseBitSetArray[index] |= other.mBaseBitSetArray[index];
+ }
+ return *this;
+}
+
+template <std::size_t N>
+constexpr BitSetArray<N> &BitSetArray<N>::operator^=(const BitSetArray<N> &other)
+{
+ for (std::size_t index = 0; index < kArraySize; index++)
+ {
+ mBaseBitSetArray[index] ^= other.mBaseBitSetArray[index];
+ }
+ return *this;
+}
+
+template <std::size_t N>
+constexpr BitSetArray<N> BitSetArray<N>::operator&(const angle::BitSetArray<N> &other) const
+{
+ angle::BitSetArray<N> result(other);
+ result &= *this;
+ return result;
+}
+
+template <std::size_t N>
+constexpr BitSetArray<N> BitSetArray<N>::operator|(const angle::BitSetArray<N> &other) const
+{
+ angle::BitSetArray<N> result(other);
+ result |= *this;
+ return result;
+}
+
+template <std::size_t N>
+constexpr BitSetArray<N> BitSetArray<N>::operator^(const angle::BitSetArray<N> &other) const
+{
+ angle::BitSetArray<N> result(other);
+ result ^= *this;
+ return result;
+}
+
+template <std::size_t N>
+constexpr bool BitSetArray<N>::operator==(const angle::BitSetArray<N> &other) const
+{
+ for (std::size_t index = 0; index < kArraySize; index++)
+ {
+ if (mBaseBitSetArray[index] != other.mBaseBitSetArray[index])
+ {
+ return false;
+ }
+ }
+ return true;
+}
+
+template <std::size_t N>
+constexpr bool BitSetArray<N>::operator!=(const angle::BitSetArray<N> &other) const
+{
+ return !(*this == other);
+}
+
+template <std::size_t N>
+constexpr BitSetArray<N> BitSetArray<N>::operator~() const
+{
+ angle::BitSetArray<N> result;
+ for (std::size_t index = 0; index < kArraySize; index++)
+ {
+ result.mBaseBitSetArray[index] |= ~mBaseBitSetArray[index];
+ }
+ // The last element in result may need special handling
+ result.mBaseBitSetArray[kArraySize - 1] &= kLastElementMask;
+
+ return result;
+}
+
+template <std::size_t N>
+constexpr bool BitSetArray<N>::operator[](std::size_t pos) const
+{
+ ASSERT(pos < size());
+ return test(pos);
+}
+
+template <std::size_t N>
+constexpr BitSetArray<N> &BitSetArray<N>::set()
+{
+ for (BaseBitSet &baseBitSet : mBaseBitSetArray)
+ {
+ baseBitSet.set();
+ }
+ // The last element in mBaseBitSetArray may need special handling
+ mBaseBitSetArray[kArraySize - 1] &= kLastElementMask;
+
+ return *this;
+}
+
+template <std::size_t N>
+constexpr BitSetArray<N> &BitSetArray<N>::set(std::size_t pos, bool value)
+{
+ ASSERT(pos < size());
+ // Get the index and offset, then set the bit
+ size_t index = pos >> kShiftForDivision;
+ size_t offset = pos & kDefaultBitSetSizeMinusOne;
+ mBaseBitSetArray[index].set(offset, value);
+ return *this;
+}
+
+template <std::size_t N>
+constexpr BitSetArray<N> &BitSetArray<N>::reset()
+{
+ for (BaseBitSet &baseBitSet : mBaseBitSetArray)
+ {
+ baseBitSet.reset();
+ }
+ return *this;
+}
+
+template <std::size_t N>
+constexpr BitSetArray<N> &BitSetArray<N>::reset(std::size_t pos)
+{
+ ASSERT(pos < size());
+ return set(pos, false);
+}
+
+template <std::size_t N>
+constexpr bool BitSetArray<N>::test(std::size_t pos) const
+{
+ ASSERT(pos < size());
+ // Get the index and offset, then test the bit
+ size_t index = pos >> kShiftForDivision;
+ size_t offset = pos & kDefaultBitSetSizeMinusOne;
+ return mBaseBitSetArray[index].test(offset);
+}
+
+template <std::size_t N>
+constexpr bool BitSetArray<N>::all() const
+{
+ constexpr priv::BaseBitSetType kLastElementBitSet = priv::BaseBitSetType(kLastElementMask);
+
+ for (std::size_t index = 0; index < kArraySize - 1; index++)
+ {
+ if (!mBaseBitSetArray[index].all())
+ {
+ return false;
+ }
+ }
+
+ // The last element in mBaseBitSetArray may need special handling
+ return mBaseBitSetArray[kArraySize - 1] == kLastElementBitSet;
+}
+
+template <std::size_t N>
+constexpr bool BitSetArray<N>::any() const
+{
+ for (const BaseBitSet &baseBitSet : mBaseBitSetArray)
+ {
+ if (baseBitSet.any())
+ {
+ return true;
+ }
+ }
+ return false;
+}
+
+template <std::size_t N>
+constexpr bool BitSetArray<N>::none() const
+{
+ for (const BaseBitSet &baseBitSet : mBaseBitSetArray)
+ {
+ if (!baseBitSet.none())
+ {
+ return false;
+ }
+ }
+ return true;
+}
+
+template <std::size_t N>
+constexpr std::size_t BitSetArray<N>::count() const
+{
+ size_t count = 0;
+ for (const BaseBitSet &baseBitSet : mBaseBitSetArray)
+ {
+ count += baseBitSet.count();
+ }
+ return count;
+}
+
+template <std::size_t N>
+constexpr bool BitSetArray<N>::intersects(const BitSetArray<N> &other) const
+{
+ for (std::size_t index = 0; index < kArraySize; index++)
+ {
+ if ((mBaseBitSetArray[index].bits() & other.mBaseBitSetArray[index].bits()) != 0)
+ {
+ return true;
+ }
+ }
+ return false;
+}
+
+template <std::size_t N>
+constexpr BitSetArray<N> &BitSetArray<N>::flip()
+{
+ for (BaseBitSet &baseBitSet : mBaseBitSetArray)
+ {
+ baseBitSet.flip();
+ }
+
+ // The last element in mBaseBitSetArray may need special handling
+ mBaseBitSetArray[kArraySize - 1] &= kLastElementMask;
+ return *this;
+}
+
+template <std::size_t N>
+constexpr typename BitSetArray<N>::param_type BitSetArray<N>::first() const
+{
+ ASSERT(any());
+ for (size_t arrayIndex = 0; arrayIndex < kArraySize; ++arrayIndex)
+ {
+ const BaseBitSet &baseBitSet = mBaseBitSetArray[arrayIndex];
+ if (baseBitSet.any())
+ {
+ return baseBitSet.first() + arrayIndex * priv::kDefaultBitSetSize;
+ }
+ }
+ UNREACHABLE();
+ return 0;
+}
+
+template <std::size_t N>
+constexpr typename BitSetArray<N>::param_type BitSetArray<N>::last() const
+{
+ ASSERT(any());
+ for (size_t arrayIndex = kArraySize; arrayIndex > 0; --arrayIndex)
+ {
+ const BaseBitSet &baseBitSet = mBaseBitSetArray[arrayIndex - 1];
+ if (baseBitSet.any())
+ {
+ return baseBitSet.last() + (arrayIndex - 1) * priv::kDefaultBitSetSize;
+ }
+ }
+ UNREACHABLE();
+ return 0;
+}
+
+template <std::size_t N>
+constexpr typename BitSetArray<N>::value_type BitSetArray<N>::bits(size_t index) const
+{
+ return mBaseBitSetArray[index].bits();
+}
+} // namespace angle
+
+template <size_t N, typename BitsT, typename ParamT>
+inline constexpr angle::BitSetT<N, BitsT, ParamT> operator&(
+ const angle::BitSetT<N, BitsT, ParamT> &lhs,
+ const angle::BitSetT<N, BitsT, ParamT> &rhs)
+{
+ angle::BitSetT<N, BitsT, ParamT> result(lhs);
+ result &= rhs.bits();
+ return result;
+}
+
+template <size_t N, typename BitsT, typename ParamT>
+inline constexpr angle::BitSetT<N, BitsT, ParamT> operator|(
+ const angle::BitSetT<N, BitsT, ParamT> &lhs,
+ const angle::BitSetT<N, BitsT, ParamT> &rhs)
+{
+ angle::BitSetT<N, BitsT, ParamT> result(lhs);
+ result |= rhs.bits();
+ return result;
+}
+
+template <size_t N, typename BitsT, typename ParamT>
+inline constexpr angle::BitSetT<N, BitsT, ParamT> operator^(
+ const angle::BitSetT<N, BitsT, ParamT> &lhs,
+ const angle::BitSetT<N, BitsT, ParamT> &rhs)
+{
+ angle::BitSetT<N, BitsT, ParamT> result(lhs);
+ result ^= rhs.bits();
+ return result;
+}
+
+template <size_t N, typename BitsT, typename ParamT>
+inline bool operator==(angle::BitSetT<N, BitsT, ParamT> &lhs, angle::BitSetT<N, BitsT, ParamT> &rhs)
+{
+ return lhs.bits() == rhs.bits();
+}
+
+template <size_t N, typename BitsT, typename ParamT>
+inline bool operator!=(angle::BitSetT<N, BitsT, ParamT> &lhs, angle::BitSetT<N, BitsT, ParamT> &rhs)
+{
+ return !(lhs == rhs);
+}
+
+#endif // COMMON_BITSETITERATOR_H_
diff --git a/gfx/angle/checkout/src/common/debug.cpp b/gfx/angle/checkout/src/common/debug.cpp
new file mode 100644
index 0000000000..23424d443a
--- /dev/null
+++ b/gfx/angle/checkout/src/common/debug.cpp
@@ -0,0 +1,349 @@
+//
+// Copyright 2002 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+
+// debug.cpp: Debugging utilities.
+
+#include "common/debug.h"
+
+#include <stdarg.h>
+
+#include <array>
+#include <cstdio>
+#include <cstring>
+#include <fstream>
+#include <ostream>
+#include <vector>
+
+#if defined(ANGLE_PLATFORM_ANDROID)
+# include <android/log.h>
+#endif
+
+#if defined(ANGLE_PLATFORM_APPLE)
+# include <os/log.h>
+#endif
+
+#if defined(ANGLE_PLATFORM_WINDOWS)
+# include <windows.h>
+#endif
+
+#include "anglebase/no_destructor.h"
+#include "common/Optional.h"
+#include "common/angleutils.h"
+#include "common/entry_points_enum_autogen.h"
+#include "common/system_utils.h"
+
+namespace gl
+{
+
+namespace
+{
+
+DebugAnnotator *g_debugAnnotator = nullptr;
+
+std::mutex *g_debugMutex = nullptr;
+
+constexpr std::array<const char *, LOG_NUM_SEVERITIES> g_logSeverityNames = {
+ {"EVENT", "INFO", "WARN", "ERR", "FATAL"}};
+
+constexpr const char *LogSeverityName(int severity)
+{
+ return (severity >= 0 && severity < LOG_NUM_SEVERITIES) ? g_logSeverityNames[severity]
+ : "UNKNOWN";
+}
+
+bool ShouldCreateLogMessage(LogSeverity severity)
+{
+#if defined(ANGLE_TRACE_ENABLED)
+ return true;
+#elif defined(ANGLE_ENABLE_ASSERTS)
+ return severity == LOG_FATAL || severity == LOG_ERR || severity == LOG_WARN;
+#else
+ return severity == LOG_FATAL || severity == LOG_ERR;
+#endif
+}
+
+} // namespace
+
+namespace priv
+{
+
+bool ShouldCreatePlatformLogMessage(LogSeverity severity)
+{
+#if defined(ANGLE_TRACE_ENABLED)
+ return true;
+#else
+ return severity != LOG_EVENT;
+#endif
+}
+
+// This is never instantiated, it's just used for EAT_STREAM_PARAMETERS to an object of the correct
+// type on the LHS of the unused part of the ternary operator.
+std::ostream *gSwallowStream;
+} // namespace priv
+
+bool DebugAnnotationsActive(const gl::Context *context)
+{
+#if defined(ANGLE_ENABLE_DEBUG_ANNOTATIONS) || defined(ANGLE_ENABLE_DEBUG_TRACE)
+ return g_debugAnnotator != nullptr && g_debugAnnotator->getStatus(context);
+#else
+ return false;
+#endif
+}
+
+bool ShouldBeginScopedEvent(const gl::Context *context)
+{
+#if defined(ANGLE_ENABLE_ANNOTATOR_RUN_TIME_CHECKS)
+ return DebugAnnotationsActive(context);
+#else
+ return true;
+#endif // defined(ANGLE_ENABLE_ANNOTATOR_RUN_TIME_CHECKS)
+}
+
+bool DebugAnnotationsInitialized()
+{
+ return g_debugAnnotator != nullptr;
+}
+
+void InitializeDebugAnnotations(DebugAnnotator *debugAnnotator)
+{
+ UninitializeDebugAnnotations();
+ g_debugAnnotator = debugAnnotator;
+}
+
+void UninitializeDebugAnnotations()
+{
+ // Pointer is not managed.
+ g_debugAnnotator = nullptr;
+}
+
+void InitializeDebugMutexIfNeeded()
+{
+ if (g_debugMutex == nullptr)
+ {
+ g_debugMutex = new std::mutex();
+ }
+}
+
+std::mutex &GetDebugMutex()
+{
+ ASSERT(g_debugMutex);
+ return *g_debugMutex;
+}
+
+ScopedPerfEventHelper::ScopedPerfEventHelper(gl::Context *context, angle::EntryPoint entryPoint)
+ : mContext(context), mEntryPoint(entryPoint), mFunctionName(nullptr), mCalledBeginEvent(false)
+{}
+
+ScopedPerfEventHelper::~ScopedPerfEventHelper()
+{
+ // EGL_Initialize() and EGL_Terminate() can change g_debugAnnotator. Must check the value of
+ // g_debugAnnotator and whether ScopedPerfEventHelper::begin() initiated a begine that must be
+ // ended now.
+ if (DebugAnnotationsInitialized() && mCalledBeginEvent)
+ {
+ g_debugAnnotator->endEvent(mContext, mFunctionName, mEntryPoint);
+ }
+}
+
+void ScopedPerfEventHelper::begin(const char *format, ...)
+{
+ mFunctionName = GetEntryPointName(mEntryPoint);
+
+ va_list vararg;
+ va_start(vararg, format);
+
+ std::vector<char> buffer;
+ size_t len = FormatStringIntoVector(format, vararg, buffer);
+ va_end(vararg);
+
+ ANGLE_LOG(EVENT) << std::string(&buffer[0], len);
+ if (DebugAnnotationsInitialized())
+ {
+ mCalledBeginEvent = true;
+ g_debugAnnotator->beginEvent(mContext, mEntryPoint, mFunctionName, buffer.data());
+ }
+}
+
+LogMessage::LogMessage(const char *file, const char *function, int line, LogSeverity severity)
+ : mFile(file), mFunction(function), mLine(line), mSeverity(severity)
+{
+ // INFO() and EVENT() do not require additional function(line) info.
+ if (mSeverity > LOG_INFO)
+ {
+ const char *slash = std::max(strrchr(mFile, '/'), strrchr(mFile, '\\'));
+ mStream << (slash ? (slash + 1) : mFile) << ":" << mLine << " (" << mFunction << "): ";
+ }
+}
+
+LogMessage::~LogMessage()
+{
+ {
+ std::unique_lock<std::mutex> lock;
+ if (g_debugMutex != nullptr)
+ {
+ lock = std::unique_lock<std::mutex>(*g_debugMutex);
+ }
+
+ if (DebugAnnotationsInitialized() && (mSeverity > LOG_INFO))
+ {
+ g_debugAnnotator->logMessage(*this);
+ }
+ else
+ {
+ Trace(getSeverity(), getMessage().c_str());
+ }
+ }
+
+ if (mSeverity == LOG_FATAL)
+ {
+ if (angle::IsDebuggerAttached())
+ {
+ angle::BreakDebugger();
+ }
+ else
+ {
+ ANGLE_CRASH();
+ }
+ }
+}
+
+void Trace(LogSeverity severity, const char *message)
+{
+ if (!ShouldCreateLogMessage(severity))
+ {
+ return;
+ }
+
+ std::string str(message);
+
+ if (DebugAnnotationsActive(/*context=*/nullptr))
+ {
+
+ switch (severity)
+ {
+ case LOG_EVENT:
+ // Debugging logging done in ScopedPerfEventHelper
+ break;
+ default:
+ g_debugAnnotator->setMarker(/*context=*/nullptr, message);
+ break;
+ }
+ }
+
+ if (severity == LOG_FATAL || severity == LOG_ERR || severity == LOG_WARN ||
+#if defined(ANGLE_ENABLE_TRACE_ANDROID_LOGCAT) || defined(ANGLE_ENABLE_TRACE_EVENTS)
+ severity == LOG_EVENT ||
+#endif
+ severity == LOG_INFO)
+ {
+#if defined(ANGLE_PLATFORM_ANDROID)
+ android_LogPriority android_priority = ANDROID_LOG_ERROR;
+ switch (severity)
+ {
+ case LOG_INFO:
+ case LOG_EVENT:
+ android_priority = ANDROID_LOG_INFO;
+ break;
+ case LOG_WARN:
+ android_priority = ANDROID_LOG_WARN;
+ break;
+ case LOG_ERR:
+ android_priority = ANDROID_LOG_ERROR;
+ break;
+ case LOG_FATAL:
+ android_priority = ANDROID_LOG_FATAL;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ __android_log_print(android_priority, "ANGLE", "%s: %s\n", LogSeverityName(severity),
+ str.c_str());
+#elif defined(ANGLE_PLATFORM_APPLE)
+ if (__builtin_available(macOS 10.12, iOS 10.0, *))
+ {
+ os_log_type_t apple_log_type = OS_LOG_TYPE_DEFAULT;
+ switch (severity)
+ {
+ case LOG_INFO:
+ apple_log_type = OS_LOG_TYPE_INFO;
+ break;
+ case LOG_WARN:
+ apple_log_type = OS_LOG_TYPE_DEFAULT;
+ break;
+ case LOG_ERR:
+ apple_log_type = OS_LOG_TYPE_ERROR;
+ break;
+ case LOG_FATAL:
+ // OS_LOG_TYPE_FAULT is too severe - grabs the entire process tree.
+ apple_log_type = OS_LOG_TYPE_ERROR;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ os_log_with_type(OS_LOG_DEFAULT, apple_log_type, "ANGLE: %s: %s\n",
+ LogSeverityName(severity), str.c_str());
+ }
+#else
+ // Note: we use fprintf because <iostream> includes static initializers.
+ fprintf((severity >= LOG_WARN) ? stderr : stdout, "%s: %s\n", LogSeverityName(severity),
+ str.c_str());
+#endif
+ }
+
+#if defined(ANGLE_PLATFORM_WINDOWS) && \
+ (defined(ANGLE_ENABLE_DEBUG_TRACE_TO_DEBUGGER) || !defined(NDEBUG))
+# if !defined(ANGLE_ENABLE_DEBUG_TRACE_TO_DEBUGGER)
+ if (severity >= LOG_ERR)
+# endif // !defined(ANGLE_ENABLE_DEBUG_TRACE_TO_DEBUGGER)
+ {
+ OutputDebugStringA(str.c_str());
+ OutputDebugStringA("\n");
+ }
+#endif
+
+#if defined(ANGLE_ENABLE_DEBUG_TRACE)
+# if defined(NDEBUG)
+ if (severity == LOG_EVENT || severity == LOG_WARN || severity == LOG_INFO)
+ {
+ return;
+ }
+# endif // defined(NDEBUG)
+ static angle::base::NoDestructor<std::ofstream> file(TRACE_OUTPUT_FILE, std::ofstream::app);
+ if (file->good())
+ {
+ if (severity > LOG_EVENT)
+ {
+ *file << LogSeverityName(severity) << ": ";
+ }
+ *file << str << "\n";
+ file->flush();
+ }
+#endif // defined(ANGLE_ENABLE_DEBUG_TRACE)
+}
+
+LogSeverity LogMessage::getSeverity() const
+{
+ return mSeverity;
+}
+
+std::string LogMessage::getMessage() const
+{
+ return mStream.str();
+}
+
+#if defined(ANGLE_PLATFORM_WINDOWS)
+priv::FmtHexHelper<HRESULT, char> FmtHR(HRESULT value)
+{
+ return priv::FmtHexHelper<HRESULT, char>("HRESULT: ", value);
+}
+
+priv::FmtHexHelper<DWORD, char> FmtErr(DWORD value)
+{
+ return priv::FmtHexHelper<DWORD, char>("error: ", value);
+}
+#endif // defined(ANGLE_PLATFORM_WINDOWS)
+
+} // namespace gl
diff --git a/gfx/angle/checkout/src/common/debug.h b/gfx/angle/checkout/src/common/debug.h
new file mode 100644
index 0000000000..a9ee795103
--- /dev/null
+++ b/gfx/angle/checkout/src/common/debug.h
@@ -0,0 +1,468 @@
+//
+// Copyright 2002 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+
+// debug.h: Debugging utilities. A lot of the logging code is adapted from Chromium's
+// base/logging.h.
+
+#ifndef COMMON_DEBUG_H_
+#define COMMON_DEBUG_H_
+
+#include <assert.h>
+#include <stdio.h>
+
+#include <iomanip>
+#include <ios>
+#include <mutex>
+#include <sstream>
+#include <string>
+
+#include "common/angleutils.h"
+#include "common/entry_points_enum_autogen.h"
+#include "common/platform.h"
+
+#if defined(ANGLE_PLATFORM_WINDOWS)
+# include <sal.h>
+typedef unsigned long DWORD;
+typedef _Return_type_success_(return >= 0) long HRESULT;
+#endif
+
+#if !defined(TRACE_OUTPUT_FILE)
+# define TRACE_OUTPUT_FILE "angle_debug.txt"
+#endif
+
+namespace gl
+{
+class Context;
+
+// Pairs a begin event with an end event.
+class [[nodiscard]] ScopedPerfEventHelper : angle::NonCopyable
+{
+ public:
+ ScopedPerfEventHelper(Context *context, angle::EntryPoint entryPoint);
+ ~ScopedPerfEventHelper();
+ ANGLE_FORMAT_PRINTF(2, 3)
+ void begin(const char *format, ...);
+
+ private:
+ gl::Context *mContext;
+ const angle::EntryPoint mEntryPoint;
+ const char *mFunctionName;
+ bool mCalledBeginEvent;
+};
+
+using LogSeverity = int;
+// Note: the log severities are used to index into the array of names,
+// see g_logSeverityNames.
+constexpr LogSeverity LOG_EVENT = 0;
+constexpr LogSeverity LOG_INFO = 1;
+constexpr LogSeverity LOG_WARN = 2;
+constexpr LogSeverity LOG_ERR = 3;
+constexpr LogSeverity LOG_FATAL = 4;
+constexpr LogSeverity LOG_NUM_SEVERITIES = 5;
+
+void Trace(LogSeverity severity, const char *message);
+
+// This class more or less represents a particular log message. You
+// create an instance of LogMessage and then stream stuff to it.
+// When you finish streaming to it, ~LogMessage is called and the
+// full message gets streamed to the appropriate destination.
+//
+// You shouldn't actually use LogMessage's constructor to log things,
+// though. You should use the ERR() and WARN() macros.
+class LogMessage : angle::NonCopyable
+{
+ public:
+ // Used for ANGLE_LOG(severity).
+ LogMessage(const char *file, const char *function, int line, LogSeverity severity);
+ ~LogMessage();
+ std::ostream &stream() { return mStream; }
+
+ LogSeverity getSeverity() const;
+ std::string getMessage() const;
+
+ private:
+ const char *mFile;
+ const char *mFunction;
+ const int mLine;
+ const LogSeverity mSeverity;
+
+ std::ostringstream mStream;
+};
+
+// Wraps the API/Platform-specific debug annotation functions.
+// Also handles redirecting logging destination.
+class DebugAnnotator : angle::NonCopyable
+{
+ public:
+ DebugAnnotator() {}
+ virtual ~DebugAnnotator() {}
+ virtual void beginEvent(gl::Context *context,
+ angle::EntryPoint entryPoint,
+ const char *eventName,
+ const char *eventMessage) = 0;
+ virtual void endEvent(gl::Context *context,
+ const char *eventName,
+ angle::EntryPoint entryPoint) = 0;
+ virtual void setMarker(gl::Context *context, const char *markerName) = 0;
+ virtual bool getStatus(const gl::Context *context) = 0;
+ // Log Message Handler that gets passed every log message,
+ // when debug annotations are initialized,
+ // replacing default handling by LogMessage.
+ virtual void logMessage(const LogMessage &msg) const = 0;
+};
+
+bool ShouldBeginScopedEvent(const gl::Context *context);
+void InitializeDebugAnnotations(DebugAnnotator *debugAnnotator);
+void UninitializeDebugAnnotations();
+bool DebugAnnotationsActive(const gl::Context *context);
+bool DebugAnnotationsInitialized();
+
+void InitializeDebugMutexIfNeeded();
+
+std::mutex &GetDebugMutex();
+
+namespace priv
+{
+// This class is used to explicitly ignore values in the conditional logging macros. This avoids
+// compiler warnings like "value computed is not used" and "statement has no effect".
+class LogMessageVoidify
+{
+ public:
+ LogMessageVoidify() {}
+ // This has to be an operator with a precedence lower than << but higher than ?:
+ void operator&(std::ostream &) {}
+};
+
+extern std::ostream *gSwallowStream;
+
+// Used by ANGLE_LOG_IS_ON to lazy-evaluate stream arguments.
+bool ShouldCreatePlatformLogMessage(LogSeverity severity);
+
+// N is the width of the output to the stream. The output is padded with zeros
+// if value is less than N characters.
+// S is the stream type, either ostream for ANSI or wostream for wide character.
+// T is the type of the value to output to the stream.
+// C is the type of characters - either char for ANSI or wchar_t for wide char.
+template <int N, typename S, typename T, typename C>
+S &FmtHex(S &stream, T value, const C *zeroX, C zero)
+{
+ stream << zeroX;
+
+ std::ios_base::fmtflags oldFlags = stream.flags();
+ std::streamsize oldWidth = stream.width();
+ typename S::char_type oldFill = stream.fill();
+
+ stream << std::hex << std::uppercase << std::setw(N) << std::setfill(zero) << value;
+
+ stream.flags(oldFlags);
+ stream.width(oldWidth);
+ stream.fill(oldFill);
+
+ return stream;
+}
+
+template <typename S, typename T, typename C>
+S &FmtHexAutoSized(S &stream, T value, const C *prefix, const C *zeroX, C zero)
+{
+ if (prefix)
+ {
+ stream << prefix;
+ }
+
+ constexpr int N = sizeof(T) * 2;
+ return priv::FmtHex<N>(stream, value, zeroX, zero);
+}
+
+template <typename T, typename C>
+class FmtHexHelper
+{
+ public:
+ FmtHexHelper(const C *prefix, T value) : mPrefix(prefix), mValue(value) {}
+ explicit FmtHexHelper(T value) : mPrefix(nullptr), mValue(value) {}
+
+ private:
+ const C *mPrefix;
+ T mValue;
+
+ friend std::ostream &operator<<(std::ostream &os, const FmtHexHelper &fmt)
+ {
+ return FmtHexAutoSized(os, fmt.mValue, fmt.mPrefix, "0x", '0');
+ }
+
+ friend std::wostream &operator<<(std::wostream &wos, const FmtHexHelper &fmt)
+ {
+ return FmtHexAutoSized(wos, fmt.mValue, fmt.mPrefix, L"0x", L'0');
+ }
+};
+
+} // namespace priv
+
+template <typename T, typename C = char>
+priv::FmtHexHelper<T, C> FmtHex(T value)
+{
+ return priv::FmtHexHelper<T, C>(value);
+}
+
+#if defined(ANGLE_PLATFORM_WINDOWS)
+priv::FmtHexHelper<HRESULT, char> FmtHR(HRESULT value);
+priv::FmtHexHelper<DWORD, char> FmtErr(DWORD value);
+#endif // defined(ANGLE_PLATFORM_WINDOWS)
+
+template <typename T>
+std::ostream &FmtHex(std::ostream &os, T value)
+{
+ return priv::FmtHexAutoSized(os, value, "", "0x", '0');
+}
+
+// A few definitions of macros that don't generate much code. These are used
+// by ANGLE_LOG(). Since these are used all over our code, it's
+// better to have compact code for these operations.
+#define COMPACT_ANGLE_LOG_EX_EVENT(ClassName, ...) \
+ ::gl::ClassName(__FILE__, __FUNCTION__, __LINE__, ::gl::LOG_EVENT, ##__VA_ARGS__)
+#define COMPACT_ANGLE_LOG_EX_INFO(ClassName, ...) \
+ ::gl::ClassName(__FILE__, __FUNCTION__, __LINE__, ::gl::LOG_INFO, ##__VA_ARGS__)
+#define COMPACT_ANGLE_LOG_EX_WARN(ClassName, ...) \
+ ::gl::ClassName(__FILE__, __FUNCTION__, __LINE__, ::gl::LOG_WARN, ##__VA_ARGS__)
+#define COMPACT_ANGLE_LOG_EX_ERR(ClassName, ...) \
+ ::gl::ClassName(__FILE__, __FUNCTION__, __LINE__, ::gl::LOG_ERR, ##__VA_ARGS__)
+#define COMPACT_ANGLE_LOG_EX_FATAL(ClassName, ...) \
+ ::gl::ClassName(__FILE__, __FUNCTION__, __LINE__, ::gl::LOG_FATAL, ##__VA_ARGS__)
+
+#define COMPACT_ANGLE_LOG_EVENT COMPACT_ANGLE_LOG_EX_EVENT(LogMessage)
+#define COMPACT_ANGLE_LOG_INFO COMPACT_ANGLE_LOG_EX_INFO(LogMessage)
+#define COMPACT_ANGLE_LOG_WARN COMPACT_ANGLE_LOG_EX_WARN(LogMessage)
+#define COMPACT_ANGLE_LOG_ERR COMPACT_ANGLE_LOG_EX_ERR(LogMessage)
+#define COMPACT_ANGLE_LOG_FATAL COMPACT_ANGLE_LOG_EX_FATAL(LogMessage)
+
+#define ANGLE_LOG_IS_ON(severity) (::gl::priv::ShouldCreatePlatformLogMessage(::gl::LOG_##severity))
+
+// Helper macro which avoids evaluating the arguments to a stream if the condition doesn't hold.
+// Condition is evaluated once and only once.
+#define ANGLE_LAZY_STREAM(stream, condition) \
+ !(condition) ? static_cast<void>(0) : ::gl::priv::LogMessageVoidify() & (stream)
+
+// We use the preprocessor's merging operator, "##", so that, e.g.,
+// ANGLE_LOG(EVENT) becomes the token COMPACT_ANGLE_LOG_EVENT. There's some funny
+// subtle difference between ostream member streaming functions (e.g.,
+// ostream::operator<<(int) and ostream non-member streaming functions
+// (e.g., ::operator<<(ostream&, string&): it turns out that it's
+// impossible to stream something like a string directly to an unnamed
+// ostream. We employ a neat hack by calling the stream() member
+// function of LogMessage which seems to avoid the problem.
+#define ANGLE_LOG_STREAM(severity) COMPACT_ANGLE_LOG_##severity.stream()
+
+#define ANGLE_LOG(severity) ANGLE_LAZY_STREAM(ANGLE_LOG_STREAM(severity), ANGLE_LOG_IS_ON(severity))
+
+} // namespace gl
+
+#if defined(ANGLE_ENABLE_DEBUG_TRACE) || defined(ANGLE_ENABLE_DEBUG_ANNOTATIONS)
+# define ANGLE_TRACE_ENABLED
+#endif
+
+#if !defined(NDEBUG) || defined(ANGLE_ASSERT_ALWAYS_ON)
+# define ANGLE_ENABLE_ASSERTS
+#endif
+
+#define INFO() ANGLE_LOG(INFO)
+#define WARN() ANGLE_LOG(WARN)
+#define ERR() ANGLE_LOG(ERR)
+#define FATAL() ANGLE_LOG(FATAL)
+
+// A macro to log a performance event around a scope.
+#if defined(ANGLE_TRACE_ENABLED)
+# if defined(_MSC_VER)
+# define EVENT(context, entryPoint, message, ...) \
+ gl::ScopedPerfEventHelper scopedPerfEventHelper##__LINE__( \
+ context, angle::EntryPoint::entryPoint); \
+ do \
+ { \
+ if (gl::ShouldBeginScopedEvent(context)) \
+ { \
+ scopedPerfEventHelper##__LINE__.begin( \
+ "%s(" message ")", GetEntryPointName(angle::EntryPoint::entryPoint), \
+ __VA_ARGS__); \
+ } \
+ } while (0)
+# else
+# define EVENT(context, entryPoint, message, ...) \
+ gl::ScopedPerfEventHelper scopedPerfEventHelper(context, \
+ angle::EntryPoint::entryPoint); \
+ do \
+ { \
+ if (gl::ShouldBeginScopedEvent(context)) \
+ { \
+ scopedPerfEventHelper.begin("%s(" message ")", \
+ GetEntryPointName(angle::EntryPoint::entryPoint), \
+ ##__VA_ARGS__); \
+ } \
+ } while (0)
+# endif // _MSC_VER
+#else
+# define EVENT(message, ...) (void(0))
+#endif
+
+// The state tracked by ANGLE will be validated with the driver state before each call
+#if defined(ANGLE_ENABLE_DEBUG_TRACE)
+# define ANGLE_STATE_VALIDATION_ENABLED
+#endif
+
+#if defined(__GNUC__)
+# define ANGLE_CRASH() __builtin_trap()
+#else
+# define ANGLE_CRASH() ((void)(*(volatile char *)0 = 0)), __assume(0)
+#endif
+
+#if !defined(NDEBUG)
+# define ANGLE_ASSERT_IMPL(expression) assert(expression)
+#else
+// TODO(jmadill): Detect if debugger is attached and break.
+# define ANGLE_ASSERT_IMPL(expression) ANGLE_CRASH()
+#endif // !defined(NDEBUG)
+
+// Note that gSwallowStream is used instead of an arbitrary LOG() stream to avoid the creation of an
+// object with a non-trivial destructor (LogMessage). On MSVC x86 (checked on 2015 Update 3), this
+// causes a few additional pointless instructions to be emitted even at full optimization level,
+// even though the : arm of the ternary operator is clearly never executed. Using a simpler object
+// to be &'d with Voidify() avoids these extra instructions. Using a simpler POD object with a
+// templated operator<< also works to avoid these instructions. However, this causes warnings on
+// statically defined implementations of operator<<(std::ostream, ...) in some .cpp files, because
+// they become defined-but-unreferenced functions. A reinterpret_cast of 0 to an ostream* also is
+// not suitable, because some compilers warn of undefined behavior.
+#define ANGLE_EAT_STREAM_PARAMETERS \
+ true ? static_cast<void>(0) : ::gl::priv::LogMessageVoidify() & (*::gl::priv::gSwallowStream)
+
+// A macro asserting a condition and outputting failures to the debug log
+#if defined(ANGLE_ENABLE_ASSERTS)
+# define ASSERT(expression) \
+ (expression ? static_cast<void>(0) \
+ : (FATAL() << "\t! Assert failed in " << __FUNCTION__ << " (" << __FILE__ \
+ << ":" << __LINE__ << "): " << #expression))
+#else
+# define ASSERT(condition) ANGLE_EAT_STREAM_PARAMETERS << !(condition)
+#endif // defined(ANGLE_ENABLE_ASSERTS)
+
+#define ANGLE_UNUSED_VARIABLE(variable) (static_cast<void>(variable))
+
+// A macro to indicate unimplemented functionality
+#ifndef NOASSERT_UNIMPLEMENTED
+# define NOASSERT_UNIMPLEMENTED 1
+#endif
+
+#if defined(ANGLE_TRACE_ENABLED) || defined(ANGLE_ENABLE_ASSERTS)
+# define UNIMPLEMENTED() \
+ do \
+ { \
+ WARN() << "\t! Unimplemented: " << __FUNCTION__ << "(" << __FILE__ << ":" << __LINE__ \
+ << ")"; \
+ ASSERT(NOASSERT_UNIMPLEMENTED); \
+ } while (0)
+
+// A macro for code which is not expected to be reached under valid assumptions
+# define UNREACHABLE() \
+ do \
+ { \
+ FATAL() << "\t! Unreachable reached: " << __FUNCTION__ << "(" << __FILE__ << ":" \
+ << __LINE__ << ")"; \
+ } while (0)
+#else
+# define UNIMPLEMENTED() \
+ do \
+ { \
+ ASSERT(NOASSERT_UNIMPLEMENTED); \
+ } while (0)
+
+// A macro for code which is not expected to be reached under valid assumptions
+# define UNREACHABLE() \
+ do \
+ { \
+ ASSERT(false); \
+ } while (0)
+#endif // defined(ANGLE_TRACE_ENABLED) || defined(ANGLE_ENABLE_ASSERTS)
+
+#if defined(ANGLE_PLATFORM_WINDOWS)
+# define ANGLE_FUNCTION __FUNCTION__
+#else
+# define ANGLE_FUNCTION __func__
+#endif
+
+// Defining ANGLE_ENABLE_STRUCT_PADDING_WARNINGS will enable warnings when members are added to
+// structs to enforce packing. This is helpful for diagnosing unexpected struct sizes when making
+// fast cache variables.
+#if defined(__clang__)
+# define ANGLE_ENABLE_STRUCT_PADDING_WARNINGS \
+ _Pragma("clang diagnostic push") _Pragma("clang diagnostic error \"-Wpadded\"")
+# define ANGLE_DISABLE_STRUCT_PADDING_WARNINGS _Pragma("clang diagnostic pop")
+#elif defined(__GNUC__)
+# define ANGLE_ENABLE_STRUCT_PADDING_WARNINGS \
+ _Pragma("GCC diagnostic push") _Pragma("GCC diagnostic error \"-Wpadded\"")
+# define ANGLE_DISABLE_STRUCT_PADDING_WARNINGS _Pragma("GCC diagnostic pop")
+#elif defined(_MSC_VER)
+# define ANGLE_ENABLE_STRUCT_PADDING_WARNINGS \
+ __pragma(warning(push)) __pragma(warning(error : 4820))
+# define ANGLE_DISABLE_STRUCT_PADDING_WARNINGS __pragma(warning(pop))
+#else
+# define ANGLE_ENABLE_STRUCT_PADDING_WARNINGS
+# define ANGLE_DISABLE_STRUCT_PADDING_WARNINGS
+#endif
+
+#if defined(__clang__)
+# define ANGLE_DISABLE_SUGGEST_OVERRIDE_WARNINGS \
+ _Pragma("clang diagnostic push") \
+ _Pragma("clang diagnostic ignored \"-Wsuggest-destructor-override\"") \
+ _Pragma("clang diagnostic ignored \"-Wsuggest-override\"")
+# define ANGLE_REENABLE_SUGGEST_OVERRIDE_WARNINGS _Pragma("clang diagnostic pop")
+#else
+# define ANGLE_DISABLE_SUGGEST_OVERRIDE_WARNINGS
+# define ANGLE_REENABLE_SUGGEST_OVERRIDE_WARNINGS
+#endif
+
+#if defined(__clang__)
+# define ANGLE_DISABLE_EXTRA_SEMI_WARNING \
+ _Pragma("clang diagnostic push") _Pragma("clang diagnostic ignored \"-Wextra-semi\"")
+# define ANGLE_REENABLE_EXTRA_SEMI_WARNING _Pragma("clang diagnostic pop")
+#else
+# define ANGLE_DISABLE_EXTRA_SEMI_WARNING
+# define ANGLE_REENABLE_EXTRA_SEMI_WARNING
+#endif
+
+#if defined(__clang__)
+# define ANGLE_DISABLE_EXTRA_SEMI_STMT_WARNING \
+ _Pragma("clang diagnostic push") _Pragma("clang diagnostic ignored \"-Wextra-semi-stmt\"")
+# define ANGLE_REENABLE_EXTRA_SEMI_STMT_WARNING _Pragma("clang diagnostic pop")
+#else
+# define ANGLE_DISABLE_EXTRA_SEMI_STMT_WARNING
+# define ANGLE_REENABLE_EXTRA_SEMI_STMT_WARNING
+#endif
+
+#if defined(__clang__)
+# define ANGLE_DISABLE_SHADOWING_WARNING \
+ _Pragma("clang diagnostic push") _Pragma("clang diagnostic ignored \"-Wshadow-field\"")
+# define ANGLE_REENABLE_SHADOWING_WARNING _Pragma("clang diagnostic pop")
+#else
+# define ANGLE_DISABLE_SHADOWING_WARNING
+# define ANGLE_REENABLE_SHADOWING_WARNING
+#endif
+
+#if defined(__clang__)
+# define ANGLE_DISABLE_DESTRUCTOR_OVERRIDE_WARNING \
+ _Pragma("clang diagnostic push") \
+ _Pragma("clang diagnostic ignored \"-Winconsistent-missing-destructor-override\"")
+# define ANGLE_REENABLE_DESTRUCTOR_OVERRIDE_WARNING _Pragma("clang diagnostic pop")
+#else
+# define ANGLE_DISABLE_DESTRUCTOR_OVERRIDE_WARNING
+# define ANGLE_REENABLE_DESTRUCTOR_OVERRIDE_WARNING
+#endif
+
+#if defined(__clang__)
+# define ANGLE_DISABLE_UNUSED_FUNCTION_WARNING \
+ _Pragma("clang diagnostic push") _Pragma("clang diagnostic ignored \"-Wunused-function\"")
+# define ANGLE_REENABLE_UNUSED_FUNCTION_WARNING _Pragma("clang diagnostic pop")
+#else
+# define ANGLE_DISABLE_UNUSED_FUNCTION_WARNING
+# define ANGLE_REENABLE_UNUSED_FUNCTION_WARNING
+#endif
+
+#endif // COMMON_DEBUG_H_
diff --git a/gfx/angle/checkout/src/common/entry_points_enum_autogen.cpp b/gfx/angle/checkout/src/common/entry_points_enum_autogen.cpp
new file mode 100644
index 0000000000..993eecc8da
--- /dev/null
+++ b/gfx/angle/checkout/src/common/entry_points_enum_autogen.cpp
@@ -0,0 +1,3454 @@
+// GENERATED FILE - DO NOT EDIT.
+// Generated by generate_entry_points.py using data from gl.xml and gl_angle_ext.xml.
+//
+// Copyright 2020 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// entry_points_enum_autogen.cpp:
+// Helper methods for the GL/GLES entry points enumeration.
+
+#include "common/entry_points_enum_autogen.h"
+
+#include "common/debug.h"
+
+namespace angle
+{
+const char *GetEntryPointName(EntryPoint ep)
+{
+ switch (ep)
+ {
+ case EntryPoint::CLBuildProgram:
+ return "clBuildProgram";
+ case EntryPoint::CLCloneKernel:
+ return "clCloneKernel";
+ case EntryPoint::CLCompileProgram:
+ return "clCompileProgram";
+ case EntryPoint::CLCreateBuffer:
+ return "clCreateBuffer";
+ case EntryPoint::CLCreateBufferWithProperties:
+ return "clCreateBufferWithProperties";
+ case EntryPoint::CLCreateCommandQueue:
+ return "clCreateCommandQueue";
+ case EntryPoint::CLCreateCommandQueueWithProperties:
+ return "clCreateCommandQueueWithProperties";
+ case EntryPoint::CLCreateContext:
+ return "clCreateContext";
+ case EntryPoint::CLCreateContextFromType:
+ return "clCreateContextFromType";
+ case EntryPoint::CLCreateImage:
+ return "clCreateImage";
+ case EntryPoint::CLCreateImage2D:
+ return "clCreateImage2D";
+ case EntryPoint::CLCreateImage3D:
+ return "clCreateImage3D";
+ case EntryPoint::CLCreateImageWithProperties:
+ return "clCreateImageWithProperties";
+ case EntryPoint::CLCreateKernel:
+ return "clCreateKernel";
+ case EntryPoint::CLCreateKernelsInProgram:
+ return "clCreateKernelsInProgram";
+ case EntryPoint::CLCreatePipe:
+ return "clCreatePipe";
+ case EntryPoint::CLCreateProgramWithBinary:
+ return "clCreateProgramWithBinary";
+ case EntryPoint::CLCreateProgramWithBuiltInKernels:
+ return "clCreateProgramWithBuiltInKernels";
+ case EntryPoint::CLCreateProgramWithIL:
+ return "clCreateProgramWithIL";
+ case EntryPoint::CLCreateProgramWithSource:
+ return "clCreateProgramWithSource";
+ case EntryPoint::CLCreateSampler:
+ return "clCreateSampler";
+ case EntryPoint::CLCreateSamplerWithProperties:
+ return "clCreateSamplerWithProperties";
+ case EntryPoint::CLCreateSubBuffer:
+ return "clCreateSubBuffer";
+ case EntryPoint::CLCreateSubDevices:
+ return "clCreateSubDevices";
+ case EntryPoint::CLCreateUserEvent:
+ return "clCreateUserEvent";
+ case EntryPoint::CLEnqueueBarrier:
+ return "clEnqueueBarrier";
+ case EntryPoint::CLEnqueueBarrierWithWaitList:
+ return "clEnqueueBarrierWithWaitList";
+ case EntryPoint::CLEnqueueCopyBuffer:
+ return "clEnqueueCopyBuffer";
+ case EntryPoint::CLEnqueueCopyBufferRect:
+ return "clEnqueueCopyBufferRect";
+ case EntryPoint::CLEnqueueCopyBufferToImage:
+ return "clEnqueueCopyBufferToImage";
+ case EntryPoint::CLEnqueueCopyImage:
+ return "clEnqueueCopyImage";
+ case EntryPoint::CLEnqueueCopyImageToBuffer:
+ return "clEnqueueCopyImageToBuffer";
+ case EntryPoint::CLEnqueueFillBuffer:
+ return "clEnqueueFillBuffer";
+ case EntryPoint::CLEnqueueFillImage:
+ return "clEnqueueFillImage";
+ case EntryPoint::CLEnqueueMapBuffer:
+ return "clEnqueueMapBuffer";
+ case EntryPoint::CLEnqueueMapImage:
+ return "clEnqueueMapImage";
+ case EntryPoint::CLEnqueueMarker:
+ return "clEnqueueMarker";
+ case EntryPoint::CLEnqueueMarkerWithWaitList:
+ return "clEnqueueMarkerWithWaitList";
+ case EntryPoint::CLEnqueueMigrateMemObjects:
+ return "clEnqueueMigrateMemObjects";
+ case EntryPoint::CLEnqueueNDRangeKernel:
+ return "clEnqueueNDRangeKernel";
+ case EntryPoint::CLEnqueueNativeKernel:
+ return "clEnqueueNativeKernel";
+ case EntryPoint::CLEnqueueReadBuffer:
+ return "clEnqueueReadBuffer";
+ case EntryPoint::CLEnqueueReadBufferRect:
+ return "clEnqueueReadBufferRect";
+ case EntryPoint::CLEnqueueReadImage:
+ return "clEnqueueReadImage";
+ case EntryPoint::CLEnqueueSVMFree:
+ return "clEnqueueSVMFree";
+ case EntryPoint::CLEnqueueSVMMap:
+ return "clEnqueueSVMMap";
+ case EntryPoint::CLEnqueueSVMMemFill:
+ return "clEnqueueSVMMemFill";
+ case EntryPoint::CLEnqueueSVMMemcpy:
+ return "clEnqueueSVMMemcpy";
+ case EntryPoint::CLEnqueueSVMMigrateMem:
+ return "clEnqueueSVMMigrateMem";
+ case EntryPoint::CLEnqueueSVMUnmap:
+ return "clEnqueueSVMUnmap";
+ case EntryPoint::CLEnqueueTask:
+ return "clEnqueueTask";
+ case EntryPoint::CLEnqueueUnmapMemObject:
+ return "clEnqueueUnmapMemObject";
+ case EntryPoint::CLEnqueueWaitForEvents:
+ return "clEnqueueWaitForEvents";
+ case EntryPoint::CLEnqueueWriteBuffer:
+ return "clEnqueueWriteBuffer";
+ case EntryPoint::CLEnqueueWriteBufferRect:
+ return "clEnqueueWriteBufferRect";
+ case EntryPoint::CLEnqueueWriteImage:
+ return "clEnqueueWriteImage";
+ case EntryPoint::CLFinish:
+ return "clFinish";
+ case EntryPoint::CLFlush:
+ return "clFlush";
+ case EntryPoint::CLGetCommandQueueInfo:
+ return "clGetCommandQueueInfo";
+ case EntryPoint::CLGetContextInfo:
+ return "clGetContextInfo";
+ case EntryPoint::CLGetDeviceAndHostTimer:
+ return "clGetDeviceAndHostTimer";
+ case EntryPoint::CLGetDeviceIDs:
+ return "clGetDeviceIDs";
+ case EntryPoint::CLGetDeviceInfo:
+ return "clGetDeviceInfo";
+ case EntryPoint::CLGetEventInfo:
+ return "clGetEventInfo";
+ case EntryPoint::CLGetEventProfilingInfo:
+ return "clGetEventProfilingInfo";
+ case EntryPoint::CLGetExtensionFunctionAddress:
+ return "clGetExtensionFunctionAddress";
+ case EntryPoint::CLGetExtensionFunctionAddressForPlatform:
+ return "clGetExtensionFunctionAddressForPlatform";
+ case EntryPoint::CLGetHostTimer:
+ return "clGetHostTimer";
+ case EntryPoint::CLGetImageInfo:
+ return "clGetImageInfo";
+ case EntryPoint::CLGetKernelArgInfo:
+ return "clGetKernelArgInfo";
+ case EntryPoint::CLGetKernelInfo:
+ return "clGetKernelInfo";
+ case EntryPoint::CLGetKernelSubGroupInfo:
+ return "clGetKernelSubGroupInfo";
+ case EntryPoint::CLGetKernelWorkGroupInfo:
+ return "clGetKernelWorkGroupInfo";
+ case EntryPoint::CLGetMemObjectInfo:
+ return "clGetMemObjectInfo";
+ case EntryPoint::CLGetPipeInfo:
+ return "clGetPipeInfo";
+ case EntryPoint::CLGetPlatformIDs:
+ return "clGetPlatformIDs";
+ case EntryPoint::CLGetPlatformInfo:
+ return "clGetPlatformInfo";
+ case EntryPoint::CLGetProgramBuildInfo:
+ return "clGetProgramBuildInfo";
+ case EntryPoint::CLGetProgramInfo:
+ return "clGetProgramInfo";
+ case EntryPoint::CLGetSamplerInfo:
+ return "clGetSamplerInfo";
+ case EntryPoint::CLGetSupportedImageFormats:
+ return "clGetSupportedImageFormats";
+ case EntryPoint::CLIcdGetPlatformIDsKHR:
+ return "clIcdGetPlatformIDsKHR";
+ case EntryPoint::CLLinkProgram:
+ return "clLinkProgram";
+ case EntryPoint::CLReleaseCommandQueue:
+ return "clReleaseCommandQueue";
+ case EntryPoint::CLReleaseContext:
+ return "clReleaseContext";
+ case EntryPoint::CLReleaseDevice:
+ return "clReleaseDevice";
+ case EntryPoint::CLReleaseEvent:
+ return "clReleaseEvent";
+ case EntryPoint::CLReleaseKernel:
+ return "clReleaseKernel";
+ case EntryPoint::CLReleaseMemObject:
+ return "clReleaseMemObject";
+ case EntryPoint::CLReleaseProgram:
+ return "clReleaseProgram";
+ case EntryPoint::CLReleaseSampler:
+ return "clReleaseSampler";
+ case EntryPoint::CLRetainCommandQueue:
+ return "clRetainCommandQueue";
+ case EntryPoint::CLRetainContext:
+ return "clRetainContext";
+ case EntryPoint::CLRetainDevice:
+ return "clRetainDevice";
+ case EntryPoint::CLRetainEvent:
+ return "clRetainEvent";
+ case EntryPoint::CLRetainKernel:
+ return "clRetainKernel";
+ case EntryPoint::CLRetainMemObject:
+ return "clRetainMemObject";
+ case EntryPoint::CLRetainProgram:
+ return "clRetainProgram";
+ case EntryPoint::CLRetainSampler:
+ return "clRetainSampler";
+ case EntryPoint::CLSVMAlloc:
+ return "clSVMAlloc";
+ case EntryPoint::CLSVMFree:
+ return "clSVMFree";
+ case EntryPoint::CLSetCommandQueueProperty:
+ return "clSetCommandQueueProperty";
+ case EntryPoint::CLSetContextDestructorCallback:
+ return "clSetContextDestructorCallback";
+ case EntryPoint::CLSetDefaultDeviceCommandQueue:
+ return "clSetDefaultDeviceCommandQueue";
+ case EntryPoint::CLSetEventCallback:
+ return "clSetEventCallback";
+ case EntryPoint::CLSetKernelArg:
+ return "clSetKernelArg";
+ case EntryPoint::CLSetKernelArgSVMPointer:
+ return "clSetKernelArgSVMPointer";
+ case EntryPoint::CLSetKernelExecInfo:
+ return "clSetKernelExecInfo";
+ case EntryPoint::CLSetMemObjectDestructorCallback:
+ return "clSetMemObjectDestructorCallback";
+ case EntryPoint::CLSetProgramReleaseCallback:
+ return "clSetProgramReleaseCallback";
+ case EntryPoint::CLSetProgramSpecializationConstant:
+ return "clSetProgramSpecializationConstant";
+ case EntryPoint::CLSetUserEventStatus:
+ return "clSetUserEventStatus";
+ case EntryPoint::CLUnloadCompiler:
+ return "clUnloadCompiler";
+ case EntryPoint::CLUnloadPlatformCompiler:
+ return "clUnloadPlatformCompiler";
+ case EntryPoint::CLWaitForEvents:
+ return "clWaitForEvents";
+ case EntryPoint::EGLBindAPI:
+ return "eglBindAPI";
+ case EntryPoint::EGLBindTexImage:
+ return "eglBindTexImage";
+ case EntryPoint::EGLChooseConfig:
+ return "eglChooseConfig";
+ case EntryPoint::EGLClientWaitSync:
+ return "eglClientWaitSync";
+ case EntryPoint::EGLClientWaitSyncKHR:
+ return "eglClientWaitSyncKHR";
+ case EntryPoint::EGLCopyBuffers:
+ return "eglCopyBuffers";
+ case EntryPoint::EGLCopyMetalSharedEventANGLE:
+ return "eglCopyMetalSharedEventANGLE";
+ case EntryPoint::EGLCreateContext:
+ return "eglCreateContext";
+ case EntryPoint::EGLCreateDeviceANGLE:
+ return "eglCreateDeviceANGLE";
+ case EntryPoint::EGLCreateImage:
+ return "eglCreateImage";
+ case EntryPoint::EGLCreateImageKHR:
+ return "eglCreateImageKHR";
+ case EntryPoint::EGLCreateNativeClientBufferANDROID:
+ return "eglCreateNativeClientBufferANDROID";
+ case EntryPoint::EGLCreatePbufferFromClientBuffer:
+ return "eglCreatePbufferFromClientBuffer";
+ case EntryPoint::EGLCreatePbufferSurface:
+ return "eglCreatePbufferSurface";
+ case EntryPoint::EGLCreatePixmapSurface:
+ return "eglCreatePixmapSurface";
+ case EntryPoint::EGLCreatePlatformPixmapSurface:
+ return "eglCreatePlatformPixmapSurface";
+ case EntryPoint::EGLCreatePlatformPixmapSurfaceEXT:
+ return "eglCreatePlatformPixmapSurfaceEXT";
+ case EntryPoint::EGLCreatePlatformWindowSurface:
+ return "eglCreatePlatformWindowSurface";
+ case EntryPoint::EGLCreatePlatformWindowSurfaceEXT:
+ return "eglCreatePlatformWindowSurfaceEXT";
+ case EntryPoint::EGLCreateStreamKHR:
+ return "eglCreateStreamKHR";
+ case EntryPoint::EGLCreateStreamProducerD3DTextureANGLE:
+ return "eglCreateStreamProducerD3DTextureANGLE";
+ case EntryPoint::EGLCreateSync:
+ return "eglCreateSync";
+ case EntryPoint::EGLCreateSyncKHR:
+ return "eglCreateSyncKHR";
+ case EntryPoint::EGLCreateWindowSurface:
+ return "eglCreateWindowSurface";
+ case EntryPoint::EGLDebugMessageControlKHR:
+ return "eglDebugMessageControlKHR";
+ case EntryPoint::EGLDestroyContext:
+ return "eglDestroyContext";
+ case EntryPoint::EGLDestroyImage:
+ return "eglDestroyImage";
+ case EntryPoint::EGLDestroyImageKHR:
+ return "eglDestroyImageKHR";
+ case EntryPoint::EGLDestroyStreamKHR:
+ return "eglDestroyStreamKHR";
+ case EntryPoint::EGLDestroySurface:
+ return "eglDestroySurface";
+ case EntryPoint::EGLDestroySync:
+ return "eglDestroySync";
+ case EntryPoint::EGLDestroySyncKHR:
+ return "eglDestroySyncKHR";
+ case EntryPoint::EGLDupNativeFenceFDANDROID:
+ return "eglDupNativeFenceFDANDROID";
+ case EntryPoint::EGLExportVkImageANGLE:
+ return "eglExportVkImageANGLE";
+ case EntryPoint::EGLForceGPUSwitchANGLE:
+ return "eglForceGPUSwitchANGLE";
+ case EntryPoint::EGLGetCompositorTimingANDROID:
+ return "eglGetCompositorTimingANDROID";
+ case EntryPoint::EGLGetCompositorTimingSupportedANDROID:
+ return "eglGetCompositorTimingSupportedANDROID";
+ case EntryPoint::EGLGetConfigAttrib:
+ return "eglGetConfigAttrib";
+ case EntryPoint::EGLGetConfigs:
+ return "eglGetConfigs";
+ case EntryPoint::EGLGetCurrentContext:
+ return "eglGetCurrentContext";
+ case EntryPoint::EGLGetCurrentDisplay:
+ return "eglGetCurrentDisplay";
+ case EntryPoint::EGLGetCurrentSurface:
+ return "eglGetCurrentSurface";
+ case EntryPoint::EGLGetDisplay:
+ return "eglGetDisplay";
+ case EntryPoint::EGLGetError:
+ return "eglGetError";
+ case EntryPoint::EGLGetFrameTimestampSupportedANDROID:
+ return "eglGetFrameTimestampSupportedANDROID";
+ case EntryPoint::EGLGetFrameTimestampsANDROID:
+ return "eglGetFrameTimestampsANDROID";
+ case EntryPoint::EGLGetMscRateANGLE:
+ return "eglGetMscRateANGLE";
+ case EntryPoint::EGLGetNativeClientBufferANDROID:
+ return "eglGetNativeClientBufferANDROID";
+ case EntryPoint::EGLGetNextFrameIdANDROID:
+ return "eglGetNextFrameIdANDROID";
+ case EntryPoint::EGLGetPlatformDisplay:
+ return "eglGetPlatformDisplay";
+ case EntryPoint::EGLGetPlatformDisplayEXT:
+ return "eglGetPlatformDisplayEXT";
+ case EntryPoint::EGLGetProcAddress:
+ return "eglGetProcAddress";
+ case EntryPoint::EGLGetSyncAttrib:
+ return "eglGetSyncAttrib";
+ case EntryPoint::EGLGetSyncAttribKHR:
+ return "eglGetSyncAttribKHR";
+ case EntryPoint::EGLGetSyncValuesCHROMIUM:
+ return "eglGetSyncValuesCHROMIUM";
+ case EntryPoint::EGLHandleGPUSwitchANGLE:
+ return "eglHandleGPUSwitchANGLE";
+ case EntryPoint::EGLInitialize:
+ return "eglInitialize";
+ case EntryPoint::EGLLabelObjectKHR:
+ return "eglLabelObjectKHR";
+ case EntryPoint::EGLLockSurfaceKHR:
+ return "eglLockSurfaceKHR";
+ case EntryPoint::EGLMakeCurrent:
+ return "eglMakeCurrent";
+ case EntryPoint::EGLPostSubBufferNV:
+ return "eglPostSubBufferNV";
+ case EntryPoint::EGLPrepareSwapBuffersANGLE:
+ return "eglPrepareSwapBuffersANGLE";
+ case EntryPoint::EGLPresentationTimeANDROID:
+ return "eglPresentationTimeANDROID";
+ case EntryPoint::EGLProgramCacheGetAttribANGLE:
+ return "eglProgramCacheGetAttribANGLE";
+ case EntryPoint::EGLProgramCachePopulateANGLE:
+ return "eglProgramCachePopulateANGLE";
+ case EntryPoint::EGLProgramCacheQueryANGLE:
+ return "eglProgramCacheQueryANGLE";
+ case EntryPoint::EGLProgramCacheResizeANGLE:
+ return "eglProgramCacheResizeANGLE";
+ case EntryPoint::EGLQueryAPI:
+ return "eglQueryAPI";
+ case EntryPoint::EGLQueryContext:
+ return "eglQueryContext";
+ case EntryPoint::EGLQueryDebugKHR:
+ return "eglQueryDebugKHR";
+ case EntryPoint::EGLQueryDeviceAttribEXT:
+ return "eglQueryDeviceAttribEXT";
+ case EntryPoint::EGLQueryDeviceStringEXT:
+ return "eglQueryDeviceStringEXT";
+ case EntryPoint::EGLQueryDisplayAttribANGLE:
+ return "eglQueryDisplayAttribANGLE";
+ case EntryPoint::EGLQueryDisplayAttribEXT:
+ return "eglQueryDisplayAttribEXT";
+ case EntryPoint::EGLQueryDmaBufFormatsEXT:
+ return "eglQueryDmaBufFormatsEXT";
+ case EntryPoint::EGLQueryDmaBufModifiersEXT:
+ return "eglQueryDmaBufModifiersEXT";
+ case EntryPoint::EGLQueryStreamKHR:
+ return "eglQueryStreamKHR";
+ case EntryPoint::EGLQueryStreamu64KHR:
+ return "eglQueryStreamu64KHR";
+ case EntryPoint::EGLQueryString:
+ return "eglQueryString";
+ case EntryPoint::EGLQueryStringiANGLE:
+ return "eglQueryStringiANGLE";
+ case EntryPoint::EGLQuerySurface:
+ return "eglQuerySurface";
+ case EntryPoint::EGLQuerySurface64KHR:
+ return "eglQuerySurface64KHR";
+ case EntryPoint::EGLQuerySurfacePointerANGLE:
+ return "eglQuerySurfacePointerANGLE";
+ case EntryPoint::EGLReacquireHighPowerGPUANGLE:
+ return "eglReacquireHighPowerGPUANGLE";
+ case EntryPoint::EGLReleaseDeviceANGLE:
+ return "eglReleaseDeviceANGLE";
+ case EntryPoint::EGLReleaseHighPowerGPUANGLE:
+ return "eglReleaseHighPowerGPUANGLE";
+ case EntryPoint::EGLReleaseTexImage:
+ return "eglReleaseTexImage";
+ case EntryPoint::EGLReleaseThread:
+ return "eglReleaseThread";
+ case EntryPoint::EGLSetBlobCacheFuncsANDROID:
+ return "eglSetBlobCacheFuncsANDROID";
+ case EntryPoint::EGLSetDamageRegionKHR:
+ return "eglSetDamageRegionKHR";
+ case EntryPoint::EGLSignalSyncKHR:
+ return "eglSignalSyncKHR";
+ case EntryPoint::EGLStreamAttribKHR:
+ return "eglStreamAttribKHR";
+ case EntryPoint::EGLStreamConsumerAcquireKHR:
+ return "eglStreamConsumerAcquireKHR";
+ case EntryPoint::EGLStreamConsumerGLTextureExternalAttribsNV:
+ return "eglStreamConsumerGLTextureExternalAttribsNV";
+ case EntryPoint::EGLStreamConsumerGLTextureExternalKHR:
+ return "eglStreamConsumerGLTextureExternalKHR";
+ case EntryPoint::EGLStreamConsumerReleaseKHR:
+ return "eglStreamConsumerReleaseKHR";
+ case EntryPoint::EGLStreamPostD3DTextureANGLE:
+ return "eglStreamPostD3DTextureANGLE";
+ case EntryPoint::EGLSurfaceAttrib:
+ return "eglSurfaceAttrib";
+ case EntryPoint::EGLSwapBuffers:
+ return "eglSwapBuffers";
+ case EntryPoint::EGLSwapBuffersWithDamageKHR:
+ return "eglSwapBuffersWithDamageKHR";
+ case EntryPoint::EGLSwapBuffersWithFrameTokenANGLE:
+ return "eglSwapBuffersWithFrameTokenANGLE";
+ case EntryPoint::EGLSwapInterval:
+ return "eglSwapInterval";
+ case EntryPoint::EGLTerminate:
+ return "eglTerminate";
+ case EntryPoint::EGLUnlockSurfaceKHR:
+ return "eglUnlockSurfaceKHR";
+ case EntryPoint::EGLWaitClient:
+ return "eglWaitClient";
+ case EntryPoint::EGLWaitGL:
+ return "eglWaitGL";
+ case EntryPoint::EGLWaitNative:
+ return "eglWaitNative";
+ case EntryPoint::EGLWaitSync:
+ return "eglWaitSync";
+ case EntryPoint::EGLWaitSyncKHR:
+ return "eglWaitSyncKHR";
+ case EntryPoint::GLAccum:
+ return "glAccum";
+ case EntryPoint::GLAcquireTexturesANGLE:
+ return "glAcquireTexturesANGLE";
+ case EntryPoint::GLActiveShaderProgram:
+ return "glActiveShaderProgram";
+ case EntryPoint::GLActiveShaderProgramEXT:
+ return "glActiveShaderProgramEXT";
+ case EntryPoint::GLActiveTexture:
+ return "glActiveTexture";
+ case EntryPoint::GLAlphaFunc:
+ return "glAlphaFunc";
+ case EntryPoint::GLAlphaFuncx:
+ return "glAlphaFuncx";
+ case EntryPoint::GLAreTexturesResident:
+ return "glAreTexturesResident";
+ case EntryPoint::GLArrayElement:
+ return "glArrayElement";
+ case EntryPoint::GLAttachShader:
+ return "glAttachShader";
+ case EntryPoint::GLBegin:
+ return "glBegin";
+ case EntryPoint::GLBeginConditionalRender:
+ return "glBeginConditionalRender";
+ case EntryPoint::GLBeginPerfMonitorAMD:
+ return "glBeginPerfMonitorAMD";
+ case EntryPoint::GLBeginPixelLocalStorageANGLE:
+ return "glBeginPixelLocalStorageANGLE";
+ case EntryPoint::GLBeginQuery:
+ return "glBeginQuery";
+ case EntryPoint::GLBeginQueryEXT:
+ return "glBeginQueryEXT";
+ case EntryPoint::GLBeginQueryIndexed:
+ return "glBeginQueryIndexed";
+ case EntryPoint::GLBeginTransformFeedback:
+ return "glBeginTransformFeedback";
+ case EntryPoint::GLBindAttribLocation:
+ return "glBindAttribLocation";
+ case EntryPoint::GLBindBuffer:
+ return "glBindBuffer";
+ case EntryPoint::GLBindBufferBase:
+ return "glBindBufferBase";
+ case EntryPoint::GLBindBufferRange:
+ return "glBindBufferRange";
+ case EntryPoint::GLBindBuffersBase:
+ return "glBindBuffersBase";
+ case EntryPoint::GLBindBuffersRange:
+ return "glBindBuffersRange";
+ case EntryPoint::GLBindFragDataLocation:
+ return "glBindFragDataLocation";
+ case EntryPoint::GLBindFragDataLocationEXT:
+ return "glBindFragDataLocationEXT";
+ case EntryPoint::GLBindFragDataLocationIndexed:
+ return "glBindFragDataLocationIndexed";
+ case EntryPoint::GLBindFragDataLocationIndexedEXT:
+ return "glBindFragDataLocationIndexedEXT";
+ case EntryPoint::GLBindFramebuffer:
+ return "glBindFramebuffer";
+ case EntryPoint::GLBindFramebufferOES:
+ return "glBindFramebufferOES";
+ case EntryPoint::GLBindImageTexture:
+ return "glBindImageTexture";
+ case EntryPoint::GLBindImageTextures:
+ return "glBindImageTextures";
+ case EntryPoint::GLBindProgramPipeline:
+ return "glBindProgramPipeline";
+ case EntryPoint::GLBindProgramPipelineEXT:
+ return "glBindProgramPipelineEXT";
+ case EntryPoint::GLBindRenderbuffer:
+ return "glBindRenderbuffer";
+ case EntryPoint::GLBindRenderbufferOES:
+ return "glBindRenderbufferOES";
+ case EntryPoint::GLBindSampler:
+ return "glBindSampler";
+ case EntryPoint::GLBindSamplers:
+ return "glBindSamplers";
+ case EntryPoint::GLBindTexture:
+ return "glBindTexture";
+ case EntryPoint::GLBindTextureUnit:
+ return "glBindTextureUnit";
+ case EntryPoint::GLBindTextures:
+ return "glBindTextures";
+ case EntryPoint::GLBindTransformFeedback:
+ return "glBindTransformFeedback";
+ case EntryPoint::GLBindUniformLocationCHROMIUM:
+ return "glBindUniformLocationCHROMIUM";
+ case EntryPoint::GLBindVertexArray:
+ return "glBindVertexArray";
+ case EntryPoint::GLBindVertexArrayOES:
+ return "glBindVertexArrayOES";
+ case EntryPoint::GLBindVertexBuffer:
+ return "glBindVertexBuffer";
+ case EntryPoint::GLBindVertexBuffers:
+ return "glBindVertexBuffers";
+ case EntryPoint::GLBitmap:
+ return "glBitmap";
+ case EntryPoint::GLBlendBarrier:
+ return "glBlendBarrier";
+ case EntryPoint::GLBlendBarrierKHR:
+ return "glBlendBarrierKHR";
+ case EntryPoint::GLBlendColor:
+ return "glBlendColor";
+ case EntryPoint::GLBlendEquation:
+ return "glBlendEquation";
+ case EntryPoint::GLBlendEquationSeparate:
+ return "glBlendEquationSeparate";
+ case EntryPoint::GLBlendEquationSeparatei:
+ return "glBlendEquationSeparatei";
+ case EntryPoint::GLBlendEquationSeparateiEXT:
+ return "glBlendEquationSeparateiEXT";
+ case EntryPoint::GLBlendEquationSeparateiOES:
+ return "glBlendEquationSeparateiOES";
+ case EntryPoint::GLBlendEquationi:
+ return "glBlendEquationi";
+ case EntryPoint::GLBlendEquationiEXT:
+ return "glBlendEquationiEXT";
+ case EntryPoint::GLBlendEquationiOES:
+ return "glBlendEquationiOES";
+ case EntryPoint::GLBlendFunc:
+ return "glBlendFunc";
+ case EntryPoint::GLBlendFuncSeparate:
+ return "glBlendFuncSeparate";
+ case EntryPoint::GLBlendFuncSeparatei:
+ return "glBlendFuncSeparatei";
+ case EntryPoint::GLBlendFuncSeparateiEXT:
+ return "glBlendFuncSeparateiEXT";
+ case EntryPoint::GLBlendFuncSeparateiOES:
+ return "glBlendFuncSeparateiOES";
+ case EntryPoint::GLBlendFunci:
+ return "glBlendFunci";
+ case EntryPoint::GLBlendFunciEXT:
+ return "glBlendFunciEXT";
+ case EntryPoint::GLBlendFunciOES:
+ return "glBlendFunciOES";
+ case EntryPoint::GLBlitFramebuffer:
+ return "glBlitFramebuffer";
+ case EntryPoint::GLBlitFramebufferANGLE:
+ return "glBlitFramebufferANGLE";
+ case EntryPoint::GLBlitFramebufferNV:
+ return "glBlitFramebufferNV";
+ case EntryPoint::GLBlitNamedFramebuffer:
+ return "glBlitNamedFramebuffer";
+ case EntryPoint::GLBufferData:
+ return "glBufferData";
+ case EntryPoint::GLBufferStorage:
+ return "glBufferStorage";
+ case EntryPoint::GLBufferStorageEXT:
+ return "glBufferStorageEXT";
+ case EntryPoint::GLBufferStorageExternalEXT:
+ return "glBufferStorageExternalEXT";
+ case EntryPoint::GLBufferStorageMemEXT:
+ return "glBufferStorageMemEXT";
+ case EntryPoint::GLBufferSubData:
+ return "glBufferSubData";
+ case EntryPoint::GLCallList:
+ return "glCallList";
+ case EntryPoint::GLCallLists:
+ return "glCallLists";
+ case EntryPoint::GLCheckFramebufferStatus:
+ return "glCheckFramebufferStatus";
+ case EntryPoint::GLCheckFramebufferStatusOES:
+ return "glCheckFramebufferStatusOES";
+ case EntryPoint::GLCheckNamedFramebufferStatus:
+ return "glCheckNamedFramebufferStatus";
+ case EntryPoint::GLClampColor:
+ return "glClampColor";
+ case EntryPoint::GLClear:
+ return "glClear";
+ case EntryPoint::GLClearAccum:
+ return "glClearAccum";
+ case EntryPoint::GLClearBufferData:
+ return "glClearBufferData";
+ case EntryPoint::GLClearBufferSubData:
+ return "glClearBufferSubData";
+ case EntryPoint::GLClearBufferfi:
+ return "glClearBufferfi";
+ case EntryPoint::GLClearBufferfv:
+ return "glClearBufferfv";
+ case EntryPoint::GLClearBufferiv:
+ return "glClearBufferiv";
+ case EntryPoint::GLClearBufferuiv:
+ return "glClearBufferuiv";
+ case EntryPoint::GLClearColor:
+ return "glClearColor";
+ case EntryPoint::GLClearColorx:
+ return "glClearColorx";
+ case EntryPoint::GLClearDepth:
+ return "glClearDepth";
+ case EntryPoint::GLClearDepthf:
+ return "glClearDepthf";
+ case EntryPoint::GLClearDepthx:
+ return "glClearDepthx";
+ case EntryPoint::GLClearIndex:
+ return "glClearIndex";
+ case EntryPoint::GLClearNamedBufferData:
+ return "glClearNamedBufferData";
+ case EntryPoint::GLClearNamedBufferSubData:
+ return "glClearNamedBufferSubData";
+ case EntryPoint::GLClearNamedFramebufferfi:
+ return "glClearNamedFramebufferfi";
+ case EntryPoint::GLClearNamedFramebufferfv:
+ return "glClearNamedFramebufferfv";
+ case EntryPoint::GLClearNamedFramebufferiv:
+ return "glClearNamedFramebufferiv";
+ case EntryPoint::GLClearNamedFramebufferuiv:
+ return "glClearNamedFramebufferuiv";
+ case EntryPoint::GLClearStencil:
+ return "glClearStencil";
+ case EntryPoint::GLClearTexImage:
+ return "glClearTexImage";
+ case EntryPoint::GLClearTexSubImage:
+ return "glClearTexSubImage";
+ case EntryPoint::GLClientActiveTexture:
+ return "glClientActiveTexture";
+ case EntryPoint::GLClientWaitSync:
+ return "glClientWaitSync";
+ case EntryPoint::GLClipControl:
+ return "glClipControl";
+ case EntryPoint::GLClipControlEXT:
+ return "glClipControlEXT";
+ case EntryPoint::GLClipPlane:
+ return "glClipPlane";
+ case EntryPoint::GLClipPlanef:
+ return "glClipPlanef";
+ case EntryPoint::GLClipPlanex:
+ return "glClipPlanex";
+ case EntryPoint::GLColor3b:
+ return "glColor3b";
+ case EntryPoint::GLColor3bv:
+ return "glColor3bv";
+ case EntryPoint::GLColor3d:
+ return "glColor3d";
+ case EntryPoint::GLColor3dv:
+ return "glColor3dv";
+ case EntryPoint::GLColor3f:
+ return "glColor3f";
+ case EntryPoint::GLColor3fv:
+ return "glColor3fv";
+ case EntryPoint::GLColor3i:
+ return "glColor3i";
+ case EntryPoint::GLColor3iv:
+ return "glColor3iv";
+ case EntryPoint::GLColor3s:
+ return "glColor3s";
+ case EntryPoint::GLColor3sv:
+ return "glColor3sv";
+ case EntryPoint::GLColor3ub:
+ return "glColor3ub";
+ case EntryPoint::GLColor3ubv:
+ return "glColor3ubv";
+ case EntryPoint::GLColor3ui:
+ return "glColor3ui";
+ case EntryPoint::GLColor3uiv:
+ return "glColor3uiv";
+ case EntryPoint::GLColor3us:
+ return "glColor3us";
+ case EntryPoint::GLColor3usv:
+ return "glColor3usv";
+ case EntryPoint::GLColor4b:
+ return "glColor4b";
+ case EntryPoint::GLColor4bv:
+ return "glColor4bv";
+ case EntryPoint::GLColor4d:
+ return "glColor4d";
+ case EntryPoint::GLColor4dv:
+ return "glColor4dv";
+ case EntryPoint::GLColor4f:
+ return "glColor4f";
+ case EntryPoint::GLColor4fv:
+ return "glColor4fv";
+ case EntryPoint::GLColor4i:
+ return "glColor4i";
+ case EntryPoint::GLColor4iv:
+ return "glColor4iv";
+ case EntryPoint::GLColor4s:
+ return "glColor4s";
+ case EntryPoint::GLColor4sv:
+ return "glColor4sv";
+ case EntryPoint::GLColor4ub:
+ return "glColor4ub";
+ case EntryPoint::GLColor4ubv:
+ return "glColor4ubv";
+ case EntryPoint::GLColor4ui:
+ return "glColor4ui";
+ case EntryPoint::GLColor4uiv:
+ return "glColor4uiv";
+ case EntryPoint::GLColor4us:
+ return "glColor4us";
+ case EntryPoint::GLColor4usv:
+ return "glColor4usv";
+ case EntryPoint::GLColor4x:
+ return "glColor4x";
+ case EntryPoint::GLColorMask:
+ return "glColorMask";
+ case EntryPoint::GLColorMaski:
+ return "glColorMaski";
+ case EntryPoint::GLColorMaskiEXT:
+ return "glColorMaskiEXT";
+ case EntryPoint::GLColorMaskiOES:
+ return "glColorMaskiOES";
+ case EntryPoint::GLColorMaterial:
+ return "glColorMaterial";
+ case EntryPoint::GLColorP3ui:
+ return "glColorP3ui";
+ case EntryPoint::GLColorP3uiv:
+ return "glColorP3uiv";
+ case EntryPoint::GLColorP4ui:
+ return "glColorP4ui";
+ case EntryPoint::GLColorP4uiv:
+ return "glColorP4uiv";
+ case EntryPoint::GLColorPointer:
+ return "glColorPointer";
+ case EntryPoint::GLCompileShader:
+ return "glCompileShader";
+ case EntryPoint::GLCompressedCopyTextureCHROMIUM:
+ return "glCompressedCopyTextureCHROMIUM";
+ case EntryPoint::GLCompressedTexImage1D:
+ return "glCompressedTexImage1D";
+ case EntryPoint::GLCompressedTexImage2D:
+ return "glCompressedTexImage2D";
+ case EntryPoint::GLCompressedTexImage2DRobustANGLE:
+ return "glCompressedTexImage2DRobustANGLE";
+ case EntryPoint::GLCompressedTexImage3D:
+ return "glCompressedTexImage3D";
+ case EntryPoint::GLCompressedTexImage3DOES:
+ return "glCompressedTexImage3DOES";
+ case EntryPoint::GLCompressedTexImage3DRobustANGLE:
+ return "glCompressedTexImage3DRobustANGLE";
+ case EntryPoint::GLCompressedTexSubImage1D:
+ return "glCompressedTexSubImage1D";
+ case EntryPoint::GLCompressedTexSubImage2D:
+ return "glCompressedTexSubImage2D";
+ case EntryPoint::GLCompressedTexSubImage2DRobustANGLE:
+ return "glCompressedTexSubImage2DRobustANGLE";
+ case EntryPoint::GLCompressedTexSubImage3D:
+ return "glCompressedTexSubImage3D";
+ case EntryPoint::GLCompressedTexSubImage3DOES:
+ return "glCompressedTexSubImage3DOES";
+ case EntryPoint::GLCompressedTexSubImage3DRobustANGLE:
+ return "glCompressedTexSubImage3DRobustANGLE";
+ case EntryPoint::GLCompressedTextureSubImage1D:
+ return "glCompressedTextureSubImage1D";
+ case EntryPoint::GLCompressedTextureSubImage2D:
+ return "glCompressedTextureSubImage2D";
+ case EntryPoint::GLCompressedTextureSubImage3D:
+ return "glCompressedTextureSubImage3D";
+ case EntryPoint::GLCopyBufferSubData:
+ return "glCopyBufferSubData";
+ case EntryPoint::GLCopyImageSubData:
+ return "glCopyImageSubData";
+ case EntryPoint::GLCopyImageSubDataEXT:
+ return "glCopyImageSubDataEXT";
+ case EntryPoint::GLCopyImageSubDataOES:
+ return "glCopyImageSubDataOES";
+ case EntryPoint::GLCopyNamedBufferSubData:
+ return "glCopyNamedBufferSubData";
+ case EntryPoint::GLCopyPixels:
+ return "glCopyPixels";
+ case EntryPoint::GLCopySubTexture3DANGLE:
+ return "glCopySubTexture3DANGLE";
+ case EntryPoint::GLCopySubTextureCHROMIUM:
+ return "glCopySubTextureCHROMIUM";
+ case EntryPoint::GLCopyTexImage1D:
+ return "glCopyTexImage1D";
+ case EntryPoint::GLCopyTexImage2D:
+ return "glCopyTexImage2D";
+ case EntryPoint::GLCopyTexSubImage1D:
+ return "glCopyTexSubImage1D";
+ case EntryPoint::GLCopyTexSubImage2D:
+ return "glCopyTexSubImage2D";
+ case EntryPoint::GLCopyTexSubImage3D:
+ return "glCopyTexSubImage3D";
+ case EntryPoint::GLCopyTexSubImage3DOES:
+ return "glCopyTexSubImage3DOES";
+ case EntryPoint::GLCopyTexture3DANGLE:
+ return "glCopyTexture3DANGLE";
+ case EntryPoint::GLCopyTextureCHROMIUM:
+ return "glCopyTextureCHROMIUM";
+ case EntryPoint::GLCopyTextureSubImage1D:
+ return "glCopyTextureSubImage1D";
+ case EntryPoint::GLCopyTextureSubImage2D:
+ return "glCopyTextureSubImage2D";
+ case EntryPoint::GLCopyTextureSubImage3D:
+ return "glCopyTextureSubImage3D";
+ case EntryPoint::GLCoverageModulationCHROMIUM:
+ return "glCoverageModulationCHROMIUM";
+ case EntryPoint::GLCreateBuffers:
+ return "glCreateBuffers";
+ case EntryPoint::GLCreateFramebuffers:
+ return "glCreateFramebuffers";
+ case EntryPoint::GLCreateMemoryObjectsEXT:
+ return "glCreateMemoryObjectsEXT";
+ case EntryPoint::GLCreateProgram:
+ return "glCreateProgram";
+ case EntryPoint::GLCreateProgramPipelines:
+ return "glCreateProgramPipelines";
+ case EntryPoint::GLCreateQueries:
+ return "glCreateQueries";
+ case EntryPoint::GLCreateRenderbuffers:
+ return "glCreateRenderbuffers";
+ case EntryPoint::GLCreateSamplers:
+ return "glCreateSamplers";
+ case EntryPoint::GLCreateShader:
+ return "glCreateShader";
+ case EntryPoint::GLCreateShaderProgramv:
+ return "glCreateShaderProgramv";
+ case EntryPoint::GLCreateShaderProgramvEXT:
+ return "glCreateShaderProgramvEXT";
+ case EntryPoint::GLCreateTextures:
+ return "glCreateTextures";
+ case EntryPoint::GLCreateTransformFeedbacks:
+ return "glCreateTransformFeedbacks";
+ case EntryPoint::GLCreateVertexArrays:
+ return "glCreateVertexArrays";
+ case EntryPoint::GLCullFace:
+ return "glCullFace";
+ case EntryPoint::GLCurrentPaletteMatrixOES:
+ return "glCurrentPaletteMatrixOES";
+ case EntryPoint::GLDebugMessageCallback:
+ return "glDebugMessageCallback";
+ case EntryPoint::GLDebugMessageCallbackKHR:
+ return "glDebugMessageCallbackKHR";
+ case EntryPoint::GLDebugMessageControl:
+ return "glDebugMessageControl";
+ case EntryPoint::GLDebugMessageControlKHR:
+ return "glDebugMessageControlKHR";
+ case EntryPoint::GLDebugMessageInsert:
+ return "glDebugMessageInsert";
+ case EntryPoint::GLDebugMessageInsertKHR:
+ return "glDebugMessageInsertKHR";
+ case EntryPoint::GLDeleteBuffers:
+ return "glDeleteBuffers";
+ case EntryPoint::GLDeleteFencesNV:
+ return "glDeleteFencesNV";
+ case EntryPoint::GLDeleteFramebuffers:
+ return "glDeleteFramebuffers";
+ case EntryPoint::GLDeleteFramebuffersOES:
+ return "glDeleteFramebuffersOES";
+ case EntryPoint::GLDeleteLists:
+ return "glDeleteLists";
+ case EntryPoint::GLDeleteMemoryObjectsEXT:
+ return "glDeleteMemoryObjectsEXT";
+ case EntryPoint::GLDeletePerfMonitorsAMD:
+ return "glDeletePerfMonitorsAMD";
+ case EntryPoint::GLDeleteProgram:
+ return "glDeleteProgram";
+ case EntryPoint::GLDeleteProgramPipelines:
+ return "glDeleteProgramPipelines";
+ case EntryPoint::GLDeleteProgramPipelinesEXT:
+ return "glDeleteProgramPipelinesEXT";
+ case EntryPoint::GLDeleteQueries:
+ return "glDeleteQueries";
+ case EntryPoint::GLDeleteQueriesEXT:
+ return "glDeleteQueriesEXT";
+ case EntryPoint::GLDeleteRenderbuffers:
+ return "glDeleteRenderbuffers";
+ case EntryPoint::GLDeleteRenderbuffersOES:
+ return "glDeleteRenderbuffersOES";
+ case EntryPoint::GLDeleteSamplers:
+ return "glDeleteSamplers";
+ case EntryPoint::GLDeleteSemaphoresEXT:
+ return "glDeleteSemaphoresEXT";
+ case EntryPoint::GLDeleteShader:
+ return "glDeleteShader";
+ case EntryPoint::GLDeleteSync:
+ return "glDeleteSync";
+ case EntryPoint::GLDeleteTextures:
+ return "glDeleteTextures";
+ case EntryPoint::GLDeleteTransformFeedbacks:
+ return "glDeleteTransformFeedbacks";
+ case EntryPoint::GLDeleteVertexArrays:
+ return "glDeleteVertexArrays";
+ case EntryPoint::GLDeleteVertexArraysOES:
+ return "glDeleteVertexArraysOES";
+ case EntryPoint::GLDepthFunc:
+ return "glDepthFunc";
+ case EntryPoint::GLDepthMask:
+ return "glDepthMask";
+ case EntryPoint::GLDepthRange:
+ return "glDepthRange";
+ case EntryPoint::GLDepthRangeArrayv:
+ return "glDepthRangeArrayv";
+ case EntryPoint::GLDepthRangeIndexed:
+ return "glDepthRangeIndexed";
+ case EntryPoint::GLDepthRangef:
+ return "glDepthRangef";
+ case EntryPoint::GLDepthRangex:
+ return "glDepthRangex";
+ case EntryPoint::GLDetachShader:
+ return "glDetachShader";
+ case EntryPoint::GLDisable:
+ return "glDisable";
+ case EntryPoint::GLDisableClientState:
+ return "glDisableClientState";
+ case EntryPoint::GLDisableExtensionANGLE:
+ return "glDisableExtensionANGLE";
+ case EntryPoint::GLDisableVertexArrayAttrib:
+ return "glDisableVertexArrayAttrib";
+ case EntryPoint::GLDisableVertexAttribArray:
+ return "glDisableVertexAttribArray";
+ case EntryPoint::GLDisablei:
+ return "glDisablei";
+ case EntryPoint::GLDisableiEXT:
+ return "glDisableiEXT";
+ case EntryPoint::GLDisableiOES:
+ return "glDisableiOES";
+ case EntryPoint::GLDiscardFramebufferEXT:
+ return "glDiscardFramebufferEXT";
+ case EntryPoint::GLDispatchCompute:
+ return "glDispatchCompute";
+ case EntryPoint::GLDispatchComputeIndirect:
+ return "glDispatchComputeIndirect";
+ case EntryPoint::GLDrawArrays:
+ return "glDrawArrays";
+ case EntryPoint::GLDrawArraysIndirect:
+ return "glDrawArraysIndirect";
+ case EntryPoint::GLDrawArraysInstanced:
+ return "glDrawArraysInstanced";
+ case EntryPoint::GLDrawArraysInstancedANGLE:
+ return "glDrawArraysInstancedANGLE";
+ case EntryPoint::GLDrawArraysInstancedBaseInstance:
+ return "glDrawArraysInstancedBaseInstance";
+ case EntryPoint::GLDrawArraysInstancedBaseInstanceANGLE:
+ return "glDrawArraysInstancedBaseInstanceANGLE";
+ case EntryPoint::GLDrawArraysInstancedBaseInstanceEXT:
+ return "glDrawArraysInstancedBaseInstanceEXT";
+ case EntryPoint::GLDrawArraysInstancedEXT:
+ return "glDrawArraysInstancedEXT";
+ case EntryPoint::GLDrawBuffer:
+ return "glDrawBuffer";
+ case EntryPoint::GLDrawBuffers:
+ return "glDrawBuffers";
+ case EntryPoint::GLDrawBuffersEXT:
+ return "glDrawBuffersEXT";
+ case EntryPoint::GLDrawElements:
+ return "glDrawElements";
+ case EntryPoint::GLDrawElementsBaseVertex:
+ return "glDrawElementsBaseVertex";
+ case EntryPoint::GLDrawElementsBaseVertexEXT:
+ return "glDrawElementsBaseVertexEXT";
+ case EntryPoint::GLDrawElementsBaseVertexOES:
+ return "glDrawElementsBaseVertexOES";
+ case EntryPoint::GLDrawElementsIndirect:
+ return "glDrawElementsIndirect";
+ case EntryPoint::GLDrawElementsInstanced:
+ return "glDrawElementsInstanced";
+ case EntryPoint::GLDrawElementsInstancedANGLE:
+ return "glDrawElementsInstancedANGLE";
+ case EntryPoint::GLDrawElementsInstancedBaseInstance:
+ return "glDrawElementsInstancedBaseInstance";
+ case EntryPoint::GLDrawElementsInstancedBaseInstanceEXT:
+ return "glDrawElementsInstancedBaseInstanceEXT";
+ case EntryPoint::GLDrawElementsInstancedBaseVertex:
+ return "glDrawElementsInstancedBaseVertex";
+ case EntryPoint::GLDrawElementsInstancedBaseVertexBaseInstance:
+ return "glDrawElementsInstancedBaseVertexBaseInstance";
+ case EntryPoint::GLDrawElementsInstancedBaseVertexBaseInstanceANGLE:
+ return "glDrawElementsInstancedBaseVertexBaseInstanceANGLE";
+ case EntryPoint::GLDrawElementsInstancedBaseVertexBaseInstanceEXT:
+ return "glDrawElementsInstancedBaseVertexBaseInstanceEXT";
+ case EntryPoint::GLDrawElementsInstancedBaseVertexEXT:
+ return "glDrawElementsInstancedBaseVertexEXT";
+ case EntryPoint::GLDrawElementsInstancedBaseVertexOES:
+ return "glDrawElementsInstancedBaseVertexOES";
+ case EntryPoint::GLDrawElementsInstancedEXT:
+ return "glDrawElementsInstancedEXT";
+ case EntryPoint::GLDrawPixels:
+ return "glDrawPixels";
+ case EntryPoint::GLDrawRangeElements:
+ return "glDrawRangeElements";
+ case EntryPoint::GLDrawRangeElementsBaseVertex:
+ return "glDrawRangeElementsBaseVertex";
+ case EntryPoint::GLDrawRangeElementsBaseVertexEXT:
+ return "glDrawRangeElementsBaseVertexEXT";
+ case EntryPoint::GLDrawRangeElementsBaseVertexOES:
+ return "glDrawRangeElementsBaseVertexOES";
+ case EntryPoint::GLDrawTexfOES:
+ return "glDrawTexfOES";
+ case EntryPoint::GLDrawTexfvOES:
+ return "glDrawTexfvOES";
+ case EntryPoint::GLDrawTexiOES:
+ return "glDrawTexiOES";
+ case EntryPoint::GLDrawTexivOES:
+ return "glDrawTexivOES";
+ case EntryPoint::GLDrawTexsOES:
+ return "glDrawTexsOES";
+ case EntryPoint::GLDrawTexsvOES:
+ return "glDrawTexsvOES";
+ case EntryPoint::GLDrawTexxOES:
+ return "glDrawTexxOES";
+ case EntryPoint::GLDrawTexxvOES:
+ return "glDrawTexxvOES";
+ case EntryPoint::GLDrawTransformFeedback:
+ return "glDrawTransformFeedback";
+ case EntryPoint::GLDrawTransformFeedbackInstanced:
+ return "glDrawTransformFeedbackInstanced";
+ case EntryPoint::GLDrawTransformFeedbackStream:
+ return "glDrawTransformFeedbackStream";
+ case EntryPoint::GLDrawTransformFeedbackStreamInstanced:
+ return "glDrawTransformFeedbackStreamInstanced";
+ case EntryPoint::GLEGLImageTargetRenderbufferStorageOES:
+ return "glEGLImageTargetRenderbufferStorageOES";
+ case EntryPoint::GLEGLImageTargetTexStorageEXT:
+ return "glEGLImageTargetTexStorageEXT";
+ case EntryPoint::GLEGLImageTargetTexture2DOES:
+ return "glEGLImageTargetTexture2DOES";
+ case EntryPoint::GLEGLImageTargetTextureStorageEXT:
+ return "glEGLImageTargetTextureStorageEXT";
+ case EntryPoint::GLEdgeFlag:
+ return "glEdgeFlag";
+ case EntryPoint::GLEdgeFlagPointer:
+ return "glEdgeFlagPointer";
+ case EntryPoint::GLEdgeFlagv:
+ return "glEdgeFlagv";
+ case EntryPoint::GLEnable:
+ return "glEnable";
+ case EntryPoint::GLEnableClientState:
+ return "glEnableClientState";
+ case EntryPoint::GLEnableVertexArrayAttrib:
+ return "glEnableVertexArrayAttrib";
+ case EntryPoint::GLEnableVertexAttribArray:
+ return "glEnableVertexAttribArray";
+ case EntryPoint::GLEnablei:
+ return "glEnablei";
+ case EntryPoint::GLEnableiEXT:
+ return "glEnableiEXT";
+ case EntryPoint::GLEnableiOES:
+ return "glEnableiOES";
+ case EntryPoint::GLEnd:
+ return "glEnd";
+ case EntryPoint::GLEndConditionalRender:
+ return "glEndConditionalRender";
+ case EntryPoint::GLEndList:
+ return "glEndList";
+ case EntryPoint::GLEndPerfMonitorAMD:
+ return "glEndPerfMonitorAMD";
+ case EntryPoint::GLEndPixelLocalStorageANGLE:
+ return "glEndPixelLocalStorageANGLE";
+ case EntryPoint::GLEndQuery:
+ return "glEndQuery";
+ case EntryPoint::GLEndQueryEXT:
+ return "glEndQueryEXT";
+ case EntryPoint::GLEndQueryIndexed:
+ return "glEndQueryIndexed";
+ case EntryPoint::GLEndTransformFeedback:
+ return "glEndTransformFeedback";
+ case EntryPoint::GLEvalCoord1d:
+ return "glEvalCoord1d";
+ case EntryPoint::GLEvalCoord1dv:
+ return "glEvalCoord1dv";
+ case EntryPoint::GLEvalCoord1f:
+ return "glEvalCoord1f";
+ case EntryPoint::GLEvalCoord1fv:
+ return "glEvalCoord1fv";
+ case EntryPoint::GLEvalCoord2d:
+ return "glEvalCoord2d";
+ case EntryPoint::GLEvalCoord2dv:
+ return "glEvalCoord2dv";
+ case EntryPoint::GLEvalCoord2f:
+ return "glEvalCoord2f";
+ case EntryPoint::GLEvalCoord2fv:
+ return "glEvalCoord2fv";
+ case EntryPoint::GLEvalMesh1:
+ return "glEvalMesh1";
+ case EntryPoint::GLEvalMesh2:
+ return "glEvalMesh2";
+ case EntryPoint::GLEvalPoint1:
+ return "glEvalPoint1";
+ case EntryPoint::GLEvalPoint2:
+ return "glEvalPoint2";
+ case EntryPoint::GLFeedbackBuffer:
+ return "glFeedbackBuffer";
+ case EntryPoint::GLFenceSync:
+ return "glFenceSync";
+ case EntryPoint::GLFinish:
+ return "glFinish";
+ case EntryPoint::GLFinishFenceNV:
+ return "glFinishFenceNV";
+ case EntryPoint::GLFlush:
+ return "glFlush";
+ case EntryPoint::GLFlushMappedBufferRange:
+ return "glFlushMappedBufferRange";
+ case EntryPoint::GLFlushMappedBufferRangeEXT:
+ return "glFlushMappedBufferRangeEXT";
+ case EntryPoint::GLFlushMappedNamedBufferRange:
+ return "glFlushMappedNamedBufferRange";
+ case EntryPoint::GLFogCoordPointer:
+ return "glFogCoordPointer";
+ case EntryPoint::GLFogCoordd:
+ return "glFogCoordd";
+ case EntryPoint::GLFogCoorddv:
+ return "glFogCoorddv";
+ case EntryPoint::GLFogCoordf:
+ return "glFogCoordf";
+ case EntryPoint::GLFogCoordfv:
+ return "glFogCoordfv";
+ case EntryPoint::GLFogf:
+ return "glFogf";
+ case EntryPoint::GLFogfv:
+ return "glFogfv";
+ case EntryPoint::GLFogi:
+ return "glFogi";
+ case EntryPoint::GLFogiv:
+ return "glFogiv";
+ case EntryPoint::GLFogx:
+ return "glFogx";
+ case EntryPoint::GLFogxv:
+ return "glFogxv";
+ case EntryPoint::GLFramebufferFetchBarrierEXT:
+ return "glFramebufferFetchBarrierEXT";
+ case EntryPoint::GLFramebufferMemorylessPixelLocalStorageANGLE:
+ return "glFramebufferMemorylessPixelLocalStorageANGLE";
+ case EntryPoint::GLFramebufferParameteri:
+ return "glFramebufferParameteri";
+ case EntryPoint::GLFramebufferParameteriMESA:
+ return "glFramebufferParameteriMESA";
+ case EntryPoint::GLFramebufferRenderbuffer:
+ return "glFramebufferRenderbuffer";
+ case EntryPoint::GLFramebufferRenderbufferOES:
+ return "glFramebufferRenderbufferOES";
+ case EntryPoint::GLFramebufferTexture:
+ return "glFramebufferTexture";
+ case EntryPoint::GLFramebufferTexture1D:
+ return "glFramebufferTexture1D";
+ case EntryPoint::GLFramebufferTexture2D:
+ return "glFramebufferTexture2D";
+ case EntryPoint::GLFramebufferTexture2DMultisampleEXT:
+ return "glFramebufferTexture2DMultisampleEXT";
+ case EntryPoint::GLFramebufferTexture2DOES:
+ return "glFramebufferTexture2DOES";
+ case EntryPoint::GLFramebufferTexture3D:
+ return "glFramebufferTexture3D";
+ case EntryPoint::GLFramebufferTexture3DOES:
+ return "glFramebufferTexture3DOES";
+ case EntryPoint::GLFramebufferTextureEXT:
+ return "glFramebufferTextureEXT";
+ case EntryPoint::GLFramebufferTextureLayer:
+ return "glFramebufferTextureLayer";
+ case EntryPoint::GLFramebufferTextureMultiviewOVR:
+ return "glFramebufferTextureMultiviewOVR";
+ case EntryPoint::GLFramebufferTextureOES:
+ return "glFramebufferTextureOES";
+ case EntryPoint::GLFramebufferTexturePixelLocalStorageANGLE:
+ return "glFramebufferTexturePixelLocalStorageANGLE";
+ case EntryPoint::GLFrontFace:
+ return "glFrontFace";
+ case EntryPoint::GLFrustum:
+ return "glFrustum";
+ case EntryPoint::GLFrustumf:
+ return "glFrustumf";
+ case EntryPoint::GLFrustumx:
+ return "glFrustumx";
+ case EntryPoint::GLGenBuffers:
+ return "glGenBuffers";
+ case EntryPoint::GLGenFencesNV:
+ return "glGenFencesNV";
+ case EntryPoint::GLGenFramebuffers:
+ return "glGenFramebuffers";
+ case EntryPoint::GLGenFramebuffersOES:
+ return "glGenFramebuffersOES";
+ case EntryPoint::GLGenLists:
+ return "glGenLists";
+ case EntryPoint::GLGenPerfMonitorsAMD:
+ return "glGenPerfMonitorsAMD";
+ case EntryPoint::GLGenProgramPipelines:
+ return "glGenProgramPipelines";
+ case EntryPoint::GLGenProgramPipelinesEXT:
+ return "glGenProgramPipelinesEXT";
+ case EntryPoint::GLGenQueries:
+ return "glGenQueries";
+ case EntryPoint::GLGenQueriesEXT:
+ return "glGenQueriesEXT";
+ case EntryPoint::GLGenRenderbuffers:
+ return "glGenRenderbuffers";
+ case EntryPoint::GLGenRenderbuffersOES:
+ return "glGenRenderbuffersOES";
+ case EntryPoint::GLGenSamplers:
+ return "glGenSamplers";
+ case EntryPoint::GLGenSemaphoresEXT:
+ return "glGenSemaphoresEXT";
+ case EntryPoint::GLGenTextures:
+ return "glGenTextures";
+ case EntryPoint::GLGenTransformFeedbacks:
+ return "glGenTransformFeedbacks";
+ case EntryPoint::GLGenVertexArrays:
+ return "glGenVertexArrays";
+ case EntryPoint::GLGenVertexArraysOES:
+ return "glGenVertexArraysOES";
+ case EntryPoint::GLGenerateMipmap:
+ return "glGenerateMipmap";
+ case EntryPoint::GLGenerateMipmapOES:
+ return "glGenerateMipmapOES";
+ case EntryPoint::GLGenerateTextureMipmap:
+ return "glGenerateTextureMipmap";
+ case EntryPoint::GLGetActiveAtomicCounterBufferiv:
+ return "glGetActiveAtomicCounterBufferiv";
+ case EntryPoint::GLGetActiveAttrib:
+ return "glGetActiveAttrib";
+ case EntryPoint::GLGetActiveSubroutineName:
+ return "glGetActiveSubroutineName";
+ case EntryPoint::GLGetActiveSubroutineUniformName:
+ return "glGetActiveSubroutineUniformName";
+ case EntryPoint::GLGetActiveSubroutineUniformiv:
+ return "glGetActiveSubroutineUniformiv";
+ case EntryPoint::GLGetActiveUniform:
+ return "glGetActiveUniform";
+ case EntryPoint::GLGetActiveUniformBlockName:
+ return "glGetActiveUniformBlockName";
+ case EntryPoint::GLGetActiveUniformBlockiv:
+ return "glGetActiveUniformBlockiv";
+ case EntryPoint::GLGetActiveUniformBlockivRobustANGLE:
+ return "glGetActiveUniformBlockivRobustANGLE";
+ case EntryPoint::GLGetActiveUniformName:
+ return "glGetActiveUniformName";
+ case EntryPoint::GLGetActiveUniformsiv:
+ return "glGetActiveUniformsiv";
+ case EntryPoint::GLGetAttachedShaders:
+ return "glGetAttachedShaders";
+ case EntryPoint::GLGetAttribLocation:
+ return "glGetAttribLocation";
+ case EntryPoint::GLGetBooleani_v:
+ return "glGetBooleani_v";
+ case EntryPoint::GLGetBooleani_vRobustANGLE:
+ return "glGetBooleani_vRobustANGLE";
+ case EntryPoint::GLGetBooleanv:
+ return "glGetBooleanv";
+ case EntryPoint::GLGetBooleanvRobustANGLE:
+ return "glGetBooleanvRobustANGLE";
+ case EntryPoint::GLGetBufferParameteri64v:
+ return "glGetBufferParameteri64v";
+ case EntryPoint::GLGetBufferParameteri64vRobustANGLE:
+ return "glGetBufferParameteri64vRobustANGLE";
+ case EntryPoint::GLGetBufferParameteriv:
+ return "glGetBufferParameteriv";
+ case EntryPoint::GLGetBufferParameterivRobustANGLE:
+ return "glGetBufferParameterivRobustANGLE";
+ case EntryPoint::GLGetBufferPointerv:
+ return "glGetBufferPointerv";
+ case EntryPoint::GLGetBufferPointervOES:
+ return "glGetBufferPointervOES";
+ case EntryPoint::GLGetBufferPointervRobustANGLE:
+ return "glGetBufferPointervRobustANGLE";
+ case EntryPoint::GLGetBufferSubData:
+ return "glGetBufferSubData";
+ case EntryPoint::GLGetClipPlane:
+ return "glGetClipPlane";
+ case EntryPoint::GLGetClipPlanef:
+ return "glGetClipPlanef";
+ case EntryPoint::GLGetClipPlanex:
+ return "glGetClipPlanex";
+ case EntryPoint::GLGetCompressedTexImage:
+ return "glGetCompressedTexImage";
+ case EntryPoint::GLGetCompressedTexImageANGLE:
+ return "glGetCompressedTexImageANGLE";
+ case EntryPoint::GLGetCompressedTextureImage:
+ return "glGetCompressedTextureImage";
+ case EntryPoint::GLGetCompressedTextureSubImage:
+ return "glGetCompressedTextureSubImage";
+ case EntryPoint::GLGetDebugMessageLog:
+ return "glGetDebugMessageLog";
+ case EntryPoint::GLGetDebugMessageLogKHR:
+ return "glGetDebugMessageLogKHR";
+ case EntryPoint::GLGetDoublei_v:
+ return "glGetDoublei_v";
+ case EntryPoint::GLGetDoublev:
+ return "glGetDoublev";
+ case EntryPoint::GLGetError:
+ return "glGetError";
+ case EntryPoint::GLGetFenceivNV:
+ return "glGetFenceivNV";
+ case EntryPoint::GLGetFixedv:
+ return "glGetFixedv";
+ case EntryPoint::GLGetFloati_v:
+ return "glGetFloati_v";
+ case EntryPoint::GLGetFloatv:
+ return "glGetFloatv";
+ case EntryPoint::GLGetFloatvRobustANGLE:
+ return "glGetFloatvRobustANGLE";
+ case EntryPoint::GLGetFragDataIndex:
+ return "glGetFragDataIndex";
+ case EntryPoint::GLGetFragDataIndexEXT:
+ return "glGetFragDataIndexEXT";
+ case EntryPoint::GLGetFragDataLocation:
+ return "glGetFragDataLocation";
+ case EntryPoint::GLGetFramebufferAttachmentParameteriv:
+ return "glGetFramebufferAttachmentParameteriv";
+ case EntryPoint::GLGetFramebufferAttachmentParameterivOES:
+ return "glGetFramebufferAttachmentParameterivOES";
+ case EntryPoint::GLGetFramebufferAttachmentParameterivRobustANGLE:
+ return "glGetFramebufferAttachmentParameterivRobustANGLE";
+ case EntryPoint::GLGetFramebufferParameteriv:
+ return "glGetFramebufferParameteriv";
+ case EntryPoint::GLGetFramebufferParameterivMESA:
+ return "glGetFramebufferParameterivMESA";
+ case EntryPoint::GLGetFramebufferParameterivRobustANGLE:
+ return "glGetFramebufferParameterivRobustANGLE";
+ case EntryPoint::GLGetGraphicsResetStatus:
+ return "glGetGraphicsResetStatus";
+ case EntryPoint::GLGetGraphicsResetStatusEXT:
+ return "glGetGraphicsResetStatusEXT";
+ case EntryPoint::GLGetInteger64i_v:
+ return "glGetInteger64i_v";
+ case EntryPoint::GLGetInteger64i_vRobustANGLE:
+ return "glGetInteger64i_vRobustANGLE";
+ case EntryPoint::GLGetInteger64v:
+ return "glGetInteger64v";
+ case EntryPoint::GLGetInteger64vEXT:
+ return "glGetInteger64vEXT";
+ case EntryPoint::GLGetInteger64vRobustANGLE:
+ return "glGetInteger64vRobustANGLE";
+ case EntryPoint::GLGetIntegeri_v:
+ return "glGetIntegeri_v";
+ case EntryPoint::GLGetIntegeri_vRobustANGLE:
+ return "glGetIntegeri_vRobustANGLE";
+ case EntryPoint::GLGetIntegerv:
+ return "glGetIntegerv";
+ case EntryPoint::GLGetIntegervRobustANGLE:
+ return "glGetIntegervRobustANGLE";
+ case EntryPoint::GLGetInternalformati64v:
+ return "glGetInternalformati64v";
+ case EntryPoint::GLGetInternalformativ:
+ return "glGetInternalformativ";
+ case EntryPoint::GLGetInternalformativRobustANGLE:
+ return "glGetInternalformativRobustANGLE";
+ case EntryPoint::GLGetLightfv:
+ return "glGetLightfv";
+ case EntryPoint::GLGetLightiv:
+ return "glGetLightiv";
+ case EntryPoint::GLGetLightxv:
+ return "glGetLightxv";
+ case EntryPoint::GLGetMapdv:
+ return "glGetMapdv";
+ case EntryPoint::GLGetMapfv:
+ return "glGetMapfv";
+ case EntryPoint::GLGetMapiv:
+ return "glGetMapiv";
+ case EntryPoint::GLGetMaterialfv:
+ return "glGetMaterialfv";
+ case EntryPoint::GLGetMaterialiv:
+ return "glGetMaterialiv";
+ case EntryPoint::GLGetMaterialxv:
+ return "glGetMaterialxv";
+ case EntryPoint::GLGetMemoryObjectParameterivEXT:
+ return "glGetMemoryObjectParameterivEXT";
+ case EntryPoint::GLGetMultisamplefv:
+ return "glGetMultisamplefv";
+ case EntryPoint::GLGetMultisamplefvANGLE:
+ return "glGetMultisamplefvANGLE";
+ case EntryPoint::GLGetMultisamplefvRobustANGLE:
+ return "glGetMultisamplefvRobustANGLE";
+ case EntryPoint::GLGetNamedBufferParameteri64v:
+ return "glGetNamedBufferParameteri64v";
+ case EntryPoint::GLGetNamedBufferParameteriv:
+ return "glGetNamedBufferParameteriv";
+ case EntryPoint::GLGetNamedBufferPointerv:
+ return "glGetNamedBufferPointerv";
+ case EntryPoint::GLGetNamedBufferSubData:
+ return "glGetNamedBufferSubData";
+ case EntryPoint::GLGetNamedFramebufferAttachmentParameteriv:
+ return "glGetNamedFramebufferAttachmentParameteriv";
+ case EntryPoint::GLGetNamedFramebufferParameteriv:
+ return "glGetNamedFramebufferParameteriv";
+ case EntryPoint::GLGetNamedRenderbufferParameteriv:
+ return "glGetNamedRenderbufferParameteriv";
+ case EntryPoint::GLGetObjectLabel:
+ return "glGetObjectLabel";
+ case EntryPoint::GLGetObjectLabelEXT:
+ return "glGetObjectLabelEXT";
+ case EntryPoint::GLGetObjectLabelKHR:
+ return "glGetObjectLabelKHR";
+ case EntryPoint::GLGetObjectPtrLabel:
+ return "glGetObjectPtrLabel";
+ case EntryPoint::GLGetObjectPtrLabelKHR:
+ return "glGetObjectPtrLabelKHR";
+ case EntryPoint::GLGetPerfMonitorCounterDataAMD:
+ return "glGetPerfMonitorCounterDataAMD";
+ case EntryPoint::GLGetPerfMonitorCounterInfoAMD:
+ return "glGetPerfMonitorCounterInfoAMD";
+ case EntryPoint::GLGetPerfMonitorCounterStringAMD:
+ return "glGetPerfMonitorCounterStringAMD";
+ case EntryPoint::GLGetPerfMonitorCountersAMD:
+ return "glGetPerfMonitorCountersAMD";
+ case EntryPoint::GLGetPerfMonitorGroupStringAMD:
+ return "glGetPerfMonitorGroupStringAMD";
+ case EntryPoint::GLGetPerfMonitorGroupsAMD:
+ return "glGetPerfMonitorGroupsAMD";
+ case EntryPoint::GLGetPixelMapfv:
+ return "glGetPixelMapfv";
+ case EntryPoint::GLGetPixelMapuiv:
+ return "glGetPixelMapuiv";
+ case EntryPoint::GLGetPixelMapusv:
+ return "glGetPixelMapusv";
+ case EntryPoint::GLGetPointerv:
+ return "glGetPointerv";
+ case EntryPoint::GLGetPointervKHR:
+ return "glGetPointervKHR";
+ case EntryPoint::GLGetPointervRobustANGLERobustANGLE:
+ return "glGetPointervRobustANGLERobustANGLE";
+ case EntryPoint::GLGetPolygonStipple:
+ return "glGetPolygonStipple";
+ case EntryPoint::GLGetProgramBinary:
+ return "glGetProgramBinary";
+ case EntryPoint::GLGetProgramBinaryOES:
+ return "glGetProgramBinaryOES";
+ case EntryPoint::GLGetProgramInfoLog:
+ return "glGetProgramInfoLog";
+ case EntryPoint::GLGetProgramInterfaceiv:
+ return "glGetProgramInterfaceiv";
+ case EntryPoint::GLGetProgramInterfaceivRobustANGLE:
+ return "glGetProgramInterfaceivRobustANGLE";
+ case EntryPoint::GLGetProgramPipelineInfoLog:
+ return "glGetProgramPipelineInfoLog";
+ case EntryPoint::GLGetProgramPipelineInfoLogEXT:
+ return "glGetProgramPipelineInfoLogEXT";
+ case EntryPoint::GLGetProgramPipelineiv:
+ return "glGetProgramPipelineiv";
+ case EntryPoint::GLGetProgramPipelineivEXT:
+ return "glGetProgramPipelineivEXT";
+ case EntryPoint::GLGetProgramResourceIndex:
+ return "glGetProgramResourceIndex";
+ case EntryPoint::GLGetProgramResourceLocation:
+ return "glGetProgramResourceLocation";
+ case EntryPoint::GLGetProgramResourceLocationIndex:
+ return "glGetProgramResourceLocationIndex";
+ case EntryPoint::GLGetProgramResourceLocationIndexEXT:
+ return "glGetProgramResourceLocationIndexEXT";
+ case EntryPoint::GLGetProgramResourceName:
+ return "glGetProgramResourceName";
+ case EntryPoint::GLGetProgramResourceiv:
+ return "glGetProgramResourceiv";
+ case EntryPoint::GLGetProgramStageiv:
+ return "glGetProgramStageiv";
+ case EntryPoint::GLGetProgramiv:
+ return "glGetProgramiv";
+ case EntryPoint::GLGetProgramivRobustANGLE:
+ return "glGetProgramivRobustANGLE";
+ case EntryPoint::GLGetQueryBufferObjecti64v:
+ return "glGetQueryBufferObjecti64v";
+ case EntryPoint::GLGetQueryBufferObjectiv:
+ return "glGetQueryBufferObjectiv";
+ case EntryPoint::GLGetQueryBufferObjectui64v:
+ return "glGetQueryBufferObjectui64v";
+ case EntryPoint::GLGetQueryBufferObjectuiv:
+ return "glGetQueryBufferObjectuiv";
+ case EntryPoint::GLGetQueryIndexediv:
+ return "glGetQueryIndexediv";
+ case EntryPoint::GLGetQueryObjecti64v:
+ return "glGetQueryObjecti64v";
+ case EntryPoint::GLGetQueryObjecti64vEXT:
+ return "glGetQueryObjecti64vEXT";
+ case EntryPoint::GLGetQueryObjecti64vRobustANGLE:
+ return "glGetQueryObjecti64vRobustANGLE";
+ case EntryPoint::GLGetQueryObjectiv:
+ return "glGetQueryObjectiv";
+ case EntryPoint::GLGetQueryObjectivEXT:
+ return "glGetQueryObjectivEXT";
+ case EntryPoint::GLGetQueryObjectivRobustANGLE:
+ return "glGetQueryObjectivRobustANGLE";
+ case EntryPoint::GLGetQueryObjectui64v:
+ return "glGetQueryObjectui64v";
+ case EntryPoint::GLGetQueryObjectui64vEXT:
+ return "glGetQueryObjectui64vEXT";
+ case EntryPoint::GLGetQueryObjectui64vRobustANGLE:
+ return "glGetQueryObjectui64vRobustANGLE";
+ case EntryPoint::GLGetQueryObjectuiv:
+ return "glGetQueryObjectuiv";
+ case EntryPoint::GLGetQueryObjectuivEXT:
+ return "glGetQueryObjectuivEXT";
+ case EntryPoint::GLGetQueryObjectuivRobustANGLE:
+ return "glGetQueryObjectuivRobustANGLE";
+ case EntryPoint::GLGetQueryiv:
+ return "glGetQueryiv";
+ case EntryPoint::GLGetQueryivEXT:
+ return "glGetQueryivEXT";
+ case EntryPoint::GLGetQueryivRobustANGLE:
+ return "glGetQueryivRobustANGLE";
+ case EntryPoint::GLGetRenderbufferImageANGLE:
+ return "glGetRenderbufferImageANGLE";
+ case EntryPoint::GLGetRenderbufferParameteriv:
+ return "glGetRenderbufferParameteriv";
+ case EntryPoint::GLGetRenderbufferParameterivOES:
+ return "glGetRenderbufferParameterivOES";
+ case EntryPoint::GLGetRenderbufferParameterivRobustANGLE:
+ return "glGetRenderbufferParameterivRobustANGLE";
+ case EntryPoint::GLGetSamplerParameterIiv:
+ return "glGetSamplerParameterIiv";
+ case EntryPoint::GLGetSamplerParameterIivEXT:
+ return "glGetSamplerParameterIivEXT";
+ case EntryPoint::GLGetSamplerParameterIivOES:
+ return "glGetSamplerParameterIivOES";
+ case EntryPoint::GLGetSamplerParameterIivRobustANGLE:
+ return "glGetSamplerParameterIivRobustANGLE";
+ case EntryPoint::GLGetSamplerParameterIuiv:
+ return "glGetSamplerParameterIuiv";
+ case EntryPoint::GLGetSamplerParameterIuivEXT:
+ return "glGetSamplerParameterIuivEXT";
+ case EntryPoint::GLGetSamplerParameterIuivOES:
+ return "glGetSamplerParameterIuivOES";
+ case EntryPoint::GLGetSamplerParameterIuivRobustANGLE:
+ return "glGetSamplerParameterIuivRobustANGLE";
+ case EntryPoint::GLGetSamplerParameterfv:
+ return "glGetSamplerParameterfv";
+ case EntryPoint::GLGetSamplerParameterfvRobustANGLE:
+ return "glGetSamplerParameterfvRobustANGLE";
+ case EntryPoint::GLGetSamplerParameteriv:
+ return "glGetSamplerParameteriv";
+ case EntryPoint::GLGetSamplerParameterivRobustANGLE:
+ return "glGetSamplerParameterivRobustANGLE";
+ case EntryPoint::GLGetSemaphoreParameterui64vEXT:
+ return "glGetSemaphoreParameterui64vEXT";
+ case EntryPoint::GLGetShaderInfoLog:
+ return "glGetShaderInfoLog";
+ case EntryPoint::GLGetShaderPrecisionFormat:
+ return "glGetShaderPrecisionFormat";
+ case EntryPoint::GLGetShaderSource:
+ return "glGetShaderSource";
+ case EntryPoint::GLGetShaderiv:
+ return "glGetShaderiv";
+ case EntryPoint::GLGetShaderivRobustANGLE:
+ return "glGetShaderivRobustANGLE";
+ case EntryPoint::GLGetString:
+ return "glGetString";
+ case EntryPoint::GLGetStringi:
+ return "glGetStringi";
+ case EntryPoint::GLGetSubroutineIndex:
+ return "glGetSubroutineIndex";
+ case EntryPoint::GLGetSubroutineUniformLocation:
+ return "glGetSubroutineUniformLocation";
+ case EntryPoint::GLGetSynciv:
+ return "glGetSynciv";
+ case EntryPoint::GLGetTexEnvfv:
+ return "glGetTexEnvfv";
+ case EntryPoint::GLGetTexEnviv:
+ return "glGetTexEnviv";
+ case EntryPoint::GLGetTexEnvxv:
+ return "glGetTexEnvxv";
+ case EntryPoint::GLGetTexGendv:
+ return "glGetTexGendv";
+ case EntryPoint::GLGetTexGenfv:
+ return "glGetTexGenfv";
+ case EntryPoint::GLGetTexGenfvOES:
+ return "glGetTexGenfvOES";
+ case EntryPoint::GLGetTexGeniv:
+ return "glGetTexGeniv";
+ case EntryPoint::GLGetTexGenivOES:
+ return "glGetTexGenivOES";
+ case EntryPoint::GLGetTexGenxvOES:
+ return "glGetTexGenxvOES";
+ case EntryPoint::GLGetTexImage:
+ return "glGetTexImage";
+ case EntryPoint::GLGetTexImageANGLE:
+ return "glGetTexImageANGLE";
+ case EntryPoint::GLGetTexLevelParameterfv:
+ return "glGetTexLevelParameterfv";
+ case EntryPoint::GLGetTexLevelParameterfvANGLE:
+ return "glGetTexLevelParameterfvANGLE";
+ case EntryPoint::GLGetTexLevelParameterfvRobustANGLE:
+ return "glGetTexLevelParameterfvRobustANGLE";
+ case EntryPoint::GLGetTexLevelParameteriv:
+ return "glGetTexLevelParameteriv";
+ case EntryPoint::GLGetTexLevelParameterivANGLE:
+ return "glGetTexLevelParameterivANGLE";
+ case EntryPoint::GLGetTexLevelParameterivRobustANGLE:
+ return "glGetTexLevelParameterivRobustANGLE";
+ case EntryPoint::GLGetTexParameterIiv:
+ return "glGetTexParameterIiv";
+ case EntryPoint::GLGetTexParameterIivEXT:
+ return "glGetTexParameterIivEXT";
+ case EntryPoint::GLGetTexParameterIivOES:
+ return "glGetTexParameterIivOES";
+ case EntryPoint::GLGetTexParameterIivRobustANGLE:
+ return "glGetTexParameterIivRobustANGLE";
+ case EntryPoint::GLGetTexParameterIuiv:
+ return "glGetTexParameterIuiv";
+ case EntryPoint::GLGetTexParameterIuivEXT:
+ return "glGetTexParameterIuivEXT";
+ case EntryPoint::GLGetTexParameterIuivOES:
+ return "glGetTexParameterIuivOES";
+ case EntryPoint::GLGetTexParameterIuivRobustANGLE:
+ return "glGetTexParameterIuivRobustANGLE";
+ case EntryPoint::GLGetTexParameterfv:
+ return "glGetTexParameterfv";
+ case EntryPoint::GLGetTexParameterfvRobustANGLE:
+ return "glGetTexParameterfvRobustANGLE";
+ case EntryPoint::GLGetTexParameteriv:
+ return "glGetTexParameteriv";
+ case EntryPoint::GLGetTexParameterivRobustANGLE:
+ return "glGetTexParameterivRobustANGLE";
+ case EntryPoint::GLGetTexParameterxv:
+ return "glGetTexParameterxv";
+ case EntryPoint::GLGetTextureImage:
+ return "glGetTextureImage";
+ case EntryPoint::GLGetTextureLevelParameterfv:
+ return "glGetTextureLevelParameterfv";
+ case EntryPoint::GLGetTextureLevelParameteriv:
+ return "glGetTextureLevelParameteriv";
+ case EntryPoint::GLGetTextureParameterIiv:
+ return "glGetTextureParameterIiv";
+ case EntryPoint::GLGetTextureParameterIuiv:
+ return "glGetTextureParameterIuiv";
+ case EntryPoint::GLGetTextureParameterfv:
+ return "glGetTextureParameterfv";
+ case EntryPoint::GLGetTextureParameteriv:
+ return "glGetTextureParameteriv";
+ case EntryPoint::GLGetTextureSubImage:
+ return "glGetTextureSubImage";
+ case EntryPoint::GLGetTransformFeedbackVarying:
+ return "glGetTransformFeedbackVarying";
+ case EntryPoint::GLGetTransformFeedbacki64_v:
+ return "glGetTransformFeedbacki64_v";
+ case EntryPoint::GLGetTransformFeedbacki_v:
+ return "glGetTransformFeedbacki_v";
+ case EntryPoint::GLGetTransformFeedbackiv:
+ return "glGetTransformFeedbackiv";
+ case EntryPoint::GLGetTranslatedShaderSourceANGLE:
+ return "glGetTranslatedShaderSourceANGLE";
+ case EntryPoint::GLGetUniformBlockIndex:
+ return "glGetUniformBlockIndex";
+ case EntryPoint::GLGetUniformIndices:
+ return "glGetUniformIndices";
+ case EntryPoint::GLGetUniformLocation:
+ return "glGetUniformLocation";
+ case EntryPoint::GLGetUniformSubroutineuiv:
+ return "glGetUniformSubroutineuiv";
+ case EntryPoint::GLGetUniformdv:
+ return "glGetUniformdv";
+ case EntryPoint::GLGetUniformfv:
+ return "glGetUniformfv";
+ case EntryPoint::GLGetUniformfvRobustANGLE:
+ return "glGetUniformfvRobustANGLE";
+ case EntryPoint::GLGetUniformiv:
+ return "glGetUniformiv";
+ case EntryPoint::GLGetUniformivRobustANGLE:
+ return "glGetUniformivRobustANGLE";
+ case EntryPoint::GLGetUniformuiv:
+ return "glGetUniformuiv";
+ case EntryPoint::GLGetUniformuivRobustANGLE:
+ return "glGetUniformuivRobustANGLE";
+ case EntryPoint::GLGetUnsignedBytei_vEXT:
+ return "glGetUnsignedBytei_vEXT";
+ case EntryPoint::GLGetUnsignedBytevEXT:
+ return "glGetUnsignedBytevEXT";
+ case EntryPoint::GLGetVertexArrayIndexed64iv:
+ return "glGetVertexArrayIndexed64iv";
+ case EntryPoint::GLGetVertexArrayIndexediv:
+ return "glGetVertexArrayIndexediv";
+ case EntryPoint::GLGetVertexArrayiv:
+ return "glGetVertexArrayiv";
+ case EntryPoint::GLGetVertexAttribIiv:
+ return "glGetVertexAttribIiv";
+ case EntryPoint::GLGetVertexAttribIivRobustANGLE:
+ return "glGetVertexAttribIivRobustANGLE";
+ case EntryPoint::GLGetVertexAttribIuiv:
+ return "glGetVertexAttribIuiv";
+ case EntryPoint::GLGetVertexAttribIuivRobustANGLE:
+ return "glGetVertexAttribIuivRobustANGLE";
+ case EntryPoint::GLGetVertexAttribLdv:
+ return "glGetVertexAttribLdv";
+ case EntryPoint::GLGetVertexAttribPointerv:
+ return "glGetVertexAttribPointerv";
+ case EntryPoint::GLGetVertexAttribPointervRobustANGLE:
+ return "glGetVertexAttribPointervRobustANGLE";
+ case EntryPoint::GLGetVertexAttribdv:
+ return "glGetVertexAttribdv";
+ case EntryPoint::GLGetVertexAttribfv:
+ return "glGetVertexAttribfv";
+ case EntryPoint::GLGetVertexAttribfvRobustANGLE:
+ return "glGetVertexAttribfvRobustANGLE";
+ case EntryPoint::GLGetVertexAttribiv:
+ return "glGetVertexAttribiv";
+ case EntryPoint::GLGetVertexAttribivRobustANGLE:
+ return "glGetVertexAttribivRobustANGLE";
+ case EntryPoint::GLGetnColorTable:
+ return "glGetnColorTable";
+ case EntryPoint::GLGetnCompressedTexImage:
+ return "glGetnCompressedTexImage";
+ case EntryPoint::GLGetnConvolutionFilter:
+ return "glGetnConvolutionFilter";
+ case EntryPoint::GLGetnHistogram:
+ return "glGetnHistogram";
+ case EntryPoint::GLGetnMapdv:
+ return "glGetnMapdv";
+ case EntryPoint::GLGetnMapfv:
+ return "glGetnMapfv";
+ case EntryPoint::GLGetnMapiv:
+ return "glGetnMapiv";
+ case EntryPoint::GLGetnMinmax:
+ return "glGetnMinmax";
+ case EntryPoint::GLGetnPixelMapfv:
+ return "glGetnPixelMapfv";
+ case EntryPoint::GLGetnPixelMapuiv:
+ return "glGetnPixelMapuiv";
+ case EntryPoint::GLGetnPixelMapusv:
+ return "glGetnPixelMapusv";
+ case EntryPoint::GLGetnPolygonStipple:
+ return "glGetnPolygonStipple";
+ case EntryPoint::GLGetnSeparableFilter:
+ return "glGetnSeparableFilter";
+ case EntryPoint::GLGetnTexImage:
+ return "glGetnTexImage";
+ case EntryPoint::GLGetnUniformdv:
+ return "glGetnUniformdv";
+ case EntryPoint::GLGetnUniformfv:
+ return "glGetnUniformfv";
+ case EntryPoint::GLGetnUniformfvEXT:
+ return "glGetnUniformfvEXT";
+ case EntryPoint::GLGetnUniformfvRobustANGLE:
+ return "glGetnUniformfvRobustANGLE";
+ case EntryPoint::GLGetnUniformiv:
+ return "glGetnUniformiv";
+ case EntryPoint::GLGetnUniformivEXT:
+ return "glGetnUniformivEXT";
+ case EntryPoint::GLGetnUniformivRobustANGLE:
+ return "glGetnUniformivRobustANGLE";
+ case EntryPoint::GLGetnUniformuiv:
+ return "glGetnUniformuiv";
+ case EntryPoint::GLGetnUniformuivRobustANGLE:
+ return "glGetnUniformuivRobustANGLE";
+ case EntryPoint::GLHint:
+ return "glHint";
+ case EntryPoint::GLImportMemoryFdEXT:
+ return "glImportMemoryFdEXT";
+ case EntryPoint::GLImportMemoryZirconHandleANGLE:
+ return "glImportMemoryZirconHandleANGLE";
+ case EntryPoint::GLImportSemaphoreFdEXT:
+ return "glImportSemaphoreFdEXT";
+ case EntryPoint::GLImportSemaphoreZirconHandleANGLE:
+ return "glImportSemaphoreZirconHandleANGLE";
+ case EntryPoint::GLIndexMask:
+ return "glIndexMask";
+ case EntryPoint::GLIndexPointer:
+ return "glIndexPointer";
+ case EntryPoint::GLIndexd:
+ return "glIndexd";
+ case EntryPoint::GLIndexdv:
+ return "glIndexdv";
+ case EntryPoint::GLIndexf:
+ return "glIndexf";
+ case EntryPoint::GLIndexfv:
+ return "glIndexfv";
+ case EntryPoint::GLIndexi:
+ return "glIndexi";
+ case EntryPoint::GLIndexiv:
+ return "glIndexiv";
+ case EntryPoint::GLIndexs:
+ return "glIndexs";
+ case EntryPoint::GLIndexsv:
+ return "glIndexsv";
+ case EntryPoint::GLIndexub:
+ return "glIndexub";
+ case EntryPoint::GLIndexubv:
+ return "glIndexubv";
+ case EntryPoint::GLInitNames:
+ return "glInitNames";
+ case EntryPoint::GLInsertEventMarkerEXT:
+ return "glInsertEventMarkerEXT";
+ case EntryPoint::GLInterleavedArrays:
+ return "glInterleavedArrays";
+ case EntryPoint::GLInvalid:
+ return "glInvalid";
+ case EntryPoint::GLInvalidateBufferData:
+ return "glInvalidateBufferData";
+ case EntryPoint::GLInvalidateBufferSubData:
+ return "glInvalidateBufferSubData";
+ case EntryPoint::GLInvalidateFramebuffer:
+ return "glInvalidateFramebuffer";
+ case EntryPoint::GLInvalidateNamedFramebufferData:
+ return "glInvalidateNamedFramebufferData";
+ case EntryPoint::GLInvalidateNamedFramebufferSubData:
+ return "glInvalidateNamedFramebufferSubData";
+ case EntryPoint::GLInvalidateSubFramebuffer:
+ return "glInvalidateSubFramebuffer";
+ case EntryPoint::GLInvalidateTexImage:
+ return "glInvalidateTexImage";
+ case EntryPoint::GLInvalidateTexSubImage:
+ return "glInvalidateTexSubImage";
+ case EntryPoint::GLInvalidateTextureANGLE:
+ return "glInvalidateTextureANGLE";
+ case EntryPoint::GLIsBuffer:
+ return "glIsBuffer";
+ case EntryPoint::GLIsEnabled:
+ return "glIsEnabled";
+ case EntryPoint::GLIsEnabledi:
+ return "glIsEnabledi";
+ case EntryPoint::GLIsEnablediEXT:
+ return "glIsEnablediEXT";
+ case EntryPoint::GLIsEnablediOES:
+ return "glIsEnablediOES";
+ case EntryPoint::GLIsFenceNV:
+ return "glIsFenceNV";
+ case EntryPoint::GLIsFramebuffer:
+ return "glIsFramebuffer";
+ case EntryPoint::GLIsFramebufferOES:
+ return "glIsFramebufferOES";
+ case EntryPoint::GLIsList:
+ return "glIsList";
+ case EntryPoint::GLIsMemoryObjectEXT:
+ return "glIsMemoryObjectEXT";
+ case EntryPoint::GLIsProgram:
+ return "glIsProgram";
+ case EntryPoint::GLIsProgramPipeline:
+ return "glIsProgramPipeline";
+ case EntryPoint::GLIsProgramPipelineEXT:
+ return "glIsProgramPipelineEXT";
+ case EntryPoint::GLIsQuery:
+ return "glIsQuery";
+ case EntryPoint::GLIsQueryEXT:
+ return "glIsQueryEXT";
+ case EntryPoint::GLIsRenderbuffer:
+ return "glIsRenderbuffer";
+ case EntryPoint::GLIsRenderbufferOES:
+ return "glIsRenderbufferOES";
+ case EntryPoint::GLIsSampler:
+ return "glIsSampler";
+ case EntryPoint::GLIsSemaphoreEXT:
+ return "glIsSemaphoreEXT";
+ case EntryPoint::GLIsShader:
+ return "glIsShader";
+ case EntryPoint::GLIsSync:
+ return "glIsSync";
+ case EntryPoint::GLIsTexture:
+ return "glIsTexture";
+ case EntryPoint::GLIsTransformFeedback:
+ return "glIsTransformFeedback";
+ case EntryPoint::GLIsVertexArray:
+ return "glIsVertexArray";
+ case EntryPoint::GLIsVertexArrayOES:
+ return "glIsVertexArrayOES";
+ case EntryPoint::GLLabelObjectEXT:
+ return "glLabelObjectEXT";
+ case EntryPoint::GLLightModelf:
+ return "glLightModelf";
+ case EntryPoint::GLLightModelfv:
+ return "glLightModelfv";
+ case EntryPoint::GLLightModeli:
+ return "glLightModeli";
+ case EntryPoint::GLLightModeliv:
+ return "glLightModeliv";
+ case EntryPoint::GLLightModelx:
+ return "glLightModelx";
+ case EntryPoint::GLLightModelxv:
+ return "glLightModelxv";
+ case EntryPoint::GLLightf:
+ return "glLightf";
+ case EntryPoint::GLLightfv:
+ return "glLightfv";
+ case EntryPoint::GLLighti:
+ return "glLighti";
+ case EntryPoint::GLLightiv:
+ return "glLightiv";
+ case EntryPoint::GLLightx:
+ return "glLightx";
+ case EntryPoint::GLLightxv:
+ return "glLightxv";
+ case EntryPoint::GLLineStipple:
+ return "glLineStipple";
+ case EntryPoint::GLLineWidth:
+ return "glLineWidth";
+ case EntryPoint::GLLineWidthx:
+ return "glLineWidthx";
+ case EntryPoint::GLLinkProgram:
+ return "glLinkProgram";
+ case EntryPoint::GLListBase:
+ return "glListBase";
+ case EntryPoint::GLLoadIdentity:
+ return "glLoadIdentity";
+ case EntryPoint::GLLoadMatrixd:
+ return "glLoadMatrixd";
+ case EntryPoint::GLLoadMatrixf:
+ return "glLoadMatrixf";
+ case EntryPoint::GLLoadMatrixx:
+ return "glLoadMatrixx";
+ case EntryPoint::GLLoadName:
+ return "glLoadName";
+ case EntryPoint::GLLoadPaletteFromModelViewMatrixOES:
+ return "glLoadPaletteFromModelViewMatrixOES";
+ case EntryPoint::GLLoadTransposeMatrixd:
+ return "glLoadTransposeMatrixd";
+ case EntryPoint::GLLoadTransposeMatrixf:
+ return "glLoadTransposeMatrixf";
+ case EntryPoint::GLLogicOp:
+ return "glLogicOp";
+ case EntryPoint::GLLogicOpANGLE:
+ return "glLogicOpANGLE";
+ case EntryPoint::GLLoseContextCHROMIUM:
+ return "glLoseContextCHROMIUM";
+ case EntryPoint::GLMap1d:
+ return "glMap1d";
+ case EntryPoint::GLMap1f:
+ return "glMap1f";
+ case EntryPoint::GLMap2d:
+ return "glMap2d";
+ case EntryPoint::GLMap2f:
+ return "glMap2f";
+ case EntryPoint::GLMapBuffer:
+ return "glMapBuffer";
+ case EntryPoint::GLMapBufferOES:
+ return "glMapBufferOES";
+ case EntryPoint::GLMapBufferRange:
+ return "glMapBufferRange";
+ case EntryPoint::GLMapBufferRangeEXT:
+ return "glMapBufferRangeEXT";
+ case EntryPoint::GLMapGrid1d:
+ return "glMapGrid1d";
+ case EntryPoint::GLMapGrid1f:
+ return "glMapGrid1f";
+ case EntryPoint::GLMapGrid2d:
+ return "glMapGrid2d";
+ case EntryPoint::GLMapGrid2f:
+ return "glMapGrid2f";
+ case EntryPoint::GLMapNamedBuffer:
+ return "glMapNamedBuffer";
+ case EntryPoint::GLMapNamedBufferRange:
+ return "glMapNamedBufferRange";
+ case EntryPoint::GLMaterialf:
+ return "glMaterialf";
+ case EntryPoint::GLMaterialfv:
+ return "glMaterialfv";
+ case EntryPoint::GLMateriali:
+ return "glMateriali";
+ case EntryPoint::GLMaterialiv:
+ return "glMaterialiv";
+ case EntryPoint::GLMaterialx:
+ return "glMaterialx";
+ case EntryPoint::GLMaterialxv:
+ return "glMaterialxv";
+ case EntryPoint::GLMatrixIndexPointerOES:
+ return "glMatrixIndexPointerOES";
+ case EntryPoint::GLMatrixMode:
+ return "glMatrixMode";
+ case EntryPoint::GLMaxShaderCompilerThreadsKHR:
+ return "glMaxShaderCompilerThreadsKHR";
+ case EntryPoint::GLMemoryBarrier:
+ return "glMemoryBarrier";
+ case EntryPoint::GLMemoryBarrierByRegion:
+ return "glMemoryBarrierByRegion";
+ case EntryPoint::GLMemoryObjectParameterivEXT:
+ return "glMemoryObjectParameterivEXT";
+ case EntryPoint::GLMinSampleShading:
+ return "glMinSampleShading";
+ case EntryPoint::GLMinSampleShadingOES:
+ return "glMinSampleShadingOES";
+ case EntryPoint::GLMultMatrixd:
+ return "glMultMatrixd";
+ case EntryPoint::GLMultMatrixf:
+ return "glMultMatrixf";
+ case EntryPoint::GLMultMatrixx:
+ return "glMultMatrixx";
+ case EntryPoint::GLMultTransposeMatrixd:
+ return "glMultTransposeMatrixd";
+ case EntryPoint::GLMultTransposeMatrixf:
+ return "glMultTransposeMatrixf";
+ case EntryPoint::GLMultiDrawArrays:
+ return "glMultiDrawArrays";
+ case EntryPoint::GLMultiDrawArraysANGLE:
+ return "glMultiDrawArraysANGLE";
+ case EntryPoint::GLMultiDrawArraysIndirect:
+ return "glMultiDrawArraysIndirect";
+ case EntryPoint::GLMultiDrawArraysIndirectCount:
+ return "glMultiDrawArraysIndirectCount";
+ case EntryPoint::GLMultiDrawArraysIndirectEXT:
+ return "glMultiDrawArraysIndirectEXT";
+ case EntryPoint::GLMultiDrawArraysInstancedANGLE:
+ return "glMultiDrawArraysInstancedANGLE";
+ case EntryPoint::GLMultiDrawArraysInstancedBaseInstanceANGLE:
+ return "glMultiDrawArraysInstancedBaseInstanceANGLE";
+ case EntryPoint::GLMultiDrawElements:
+ return "glMultiDrawElements";
+ case EntryPoint::GLMultiDrawElementsANGLE:
+ return "glMultiDrawElementsANGLE";
+ case EntryPoint::GLMultiDrawElementsBaseVertex:
+ return "glMultiDrawElementsBaseVertex";
+ case EntryPoint::GLMultiDrawElementsBaseVertexEXT:
+ return "glMultiDrawElementsBaseVertexEXT";
+ case EntryPoint::GLMultiDrawElementsIndirect:
+ return "glMultiDrawElementsIndirect";
+ case EntryPoint::GLMultiDrawElementsIndirectCount:
+ return "glMultiDrawElementsIndirectCount";
+ case EntryPoint::GLMultiDrawElementsIndirectEXT:
+ return "glMultiDrawElementsIndirectEXT";
+ case EntryPoint::GLMultiDrawElementsInstancedANGLE:
+ return "glMultiDrawElementsInstancedANGLE";
+ case EntryPoint::GLMultiDrawElementsInstancedBaseVertexBaseInstanceANGLE:
+ return "glMultiDrawElementsInstancedBaseVertexBaseInstanceANGLE";
+ case EntryPoint::GLMultiTexCoord1d:
+ return "glMultiTexCoord1d";
+ case EntryPoint::GLMultiTexCoord1dv:
+ return "glMultiTexCoord1dv";
+ case EntryPoint::GLMultiTexCoord1f:
+ return "glMultiTexCoord1f";
+ case EntryPoint::GLMultiTexCoord1fv:
+ return "glMultiTexCoord1fv";
+ case EntryPoint::GLMultiTexCoord1i:
+ return "glMultiTexCoord1i";
+ case EntryPoint::GLMultiTexCoord1iv:
+ return "glMultiTexCoord1iv";
+ case EntryPoint::GLMultiTexCoord1s:
+ return "glMultiTexCoord1s";
+ case EntryPoint::GLMultiTexCoord1sv:
+ return "glMultiTexCoord1sv";
+ case EntryPoint::GLMultiTexCoord2d:
+ return "glMultiTexCoord2d";
+ case EntryPoint::GLMultiTexCoord2dv:
+ return "glMultiTexCoord2dv";
+ case EntryPoint::GLMultiTexCoord2f:
+ return "glMultiTexCoord2f";
+ case EntryPoint::GLMultiTexCoord2fv:
+ return "glMultiTexCoord2fv";
+ case EntryPoint::GLMultiTexCoord2i:
+ return "glMultiTexCoord2i";
+ case EntryPoint::GLMultiTexCoord2iv:
+ return "glMultiTexCoord2iv";
+ case EntryPoint::GLMultiTexCoord2s:
+ return "glMultiTexCoord2s";
+ case EntryPoint::GLMultiTexCoord2sv:
+ return "glMultiTexCoord2sv";
+ case EntryPoint::GLMultiTexCoord3d:
+ return "glMultiTexCoord3d";
+ case EntryPoint::GLMultiTexCoord3dv:
+ return "glMultiTexCoord3dv";
+ case EntryPoint::GLMultiTexCoord3f:
+ return "glMultiTexCoord3f";
+ case EntryPoint::GLMultiTexCoord3fv:
+ return "glMultiTexCoord3fv";
+ case EntryPoint::GLMultiTexCoord3i:
+ return "glMultiTexCoord3i";
+ case EntryPoint::GLMultiTexCoord3iv:
+ return "glMultiTexCoord3iv";
+ case EntryPoint::GLMultiTexCoord3s:
+ return "glMultiTexCoord3s";
+ case EntryPoint::GLMultiTexCoord3sv:
+ return "glMultiTexCoord3sv";
+ case EntryPoint::GLMultiTexCoord4d:
+ return "glMultiTexCoord4d";
+ case EntryPoint::GLMultiTexCoord4dv:
+ return "glMultiTexCoord4dv";
+ case EntryPoint::GLMultiTexCoord4f:
+ return "glMultiTexCoord4f";
+ case EntryPoint::GLMultiTexCoord4fv:
+ return "glMultiTexCoord4fv";
+ case EntryPoint::GLMultiTexCoord4i:
+ return "glMultiTexCoord4i";
+ case EntryPoint::GLMultiTexCoord4iv:
+ return "glMultiTexCoord4iv";
+ case EntryPoint::GLMultiTexCoord4s:
+ return "glMultiTexCoord4s";
+ case EntryPoint::GLMultiTexCoord4sv:
+ return "glMultiTexCoord4sv";
+ case EntryPoint::GLMultiTexCoord4x:
+ return "glMultiTexCoord4x";
+ case EntryPoint::GLMultiTexCoordP1ui:
+ return "glMultiTexCoordP1ui";
+ case EntryPoint::GLMultiTexCoordP1uiv:
+ return "glMultiTexCoordP1uiv";
+ case EntryPoint::GLMultiTexCoordP2ui:
+ return "glMultiTexCoordP2ui";
+ case EntryPoint::GLMultiTexCoordP2uiv:
+ return "glMultiTexCoordP2uiv";
+ case EntryPoint::GLMultiTexCoordP3ui:
+ return "glMultiTexCoordP3ui";
+ case EntryPoint::GLMultiTexCoordP3uiv:
+ return "glMultiTexCoordP3uiv";
+ case EntryPoint::GLMultiTexCoordP4ui:
+ return "glMultiTexCoordP4ui";
+ case EntryPoint::GLMultiTexCoordP4uiv:
+ return "glMultiTexCoordP4uiv";
+ case EntryPoint::GLNamedBufferData:
+ return "glNamedBufferData";
+ case EntryPoint::GLNamedBufferStorage:
+ return "glNamedBufferStorage";
+ case EntryPoint::GLNamedBufferStorageExternalEXT:
+ return "glNamedBufferStorageExternalEXT";
+ case EntryPoint::GLNamedBufferSubData:
+ return "glNamedBufferSubData";
+ case EntryPoint::GLNamedFramebufferDrawBuffer:
+ return "glNamedFramebufferDrawBuffer";
+ case EntryPoint::GLNamedFramebufferDrawBuffers:
+ return "glNamedFramebufferDrawBuffers";
+ case EntryPoint::GLNamedFramebufferParameteri:
+ return "glNamedFramebufferParameteri";
+ case EntryPoint::GLNamedFramebufferReadBuffer:
+ return "glNamedFramebufferReadBuffer";
+ case EntryPoint::GLNamedFramebufferRenderbuffer:
+ return "glNamedFramebufferRenderbuffer";
+ case EntryPoint::GLNamedFramebufferTexture:
+ return "glNamedFramebufferTexture";
+ case EntryPoint::GLNamedFramebufferTextureLayer:
+ return "glNamedFramebufferTextureLayer";
+ case EntryPoint::GLNamedRenderbufferStorage:
+ return "glNamedRenderbufferStorage";
+ case EntryPoint::GLNamedRenderbufferStorageMultisample:
+ return "glNamedRenderbufferStorageMultisample";
+ case EntryPoint::GLNewList:
+ return "glNewList";
+ case EntryPoint::GLNormal3b:
+ return "glNormal3b";
+ case EntryPoint::GLNormal3bv:
+ return "glNormal3bv";
+ case EntryPoint::GLNormal3d:
+ return "glNormal3d";
+ case EntryPoint::GLNormal3dv:
+ return "glNormal3dv";
+ case EntryPoint::GLNormal3f:
+ return "glNormal3f";
+ case EntryPoint::GLNormal3fv:
+ return "glNormal3fv";
+ case EntryPoint::GLNormal3i:
+ return "glNormal3i";
+ case EntryPoint::GLNormal3iv:
+ return "glNormal3iv";
+ case EntryPoint::GLNormal3s:
+ return "glNormal3s";
+ case EntryPoint::GLNormal3sv:
+ return "glNormal3sv";
+ case EntryPoint::GLNormal3x:
+ return "glNormal3x";
+ case EntryPoint::GLNormalP3ui:
+ return "glNormalP3ui";
+ case EntryPoint::GLNormalP3uiv:
+ return "glNormalP3uiv";
+ case EntryPoint::GLNormalPointer:
+ return "glNormalPointer";
+ case EntryPoint::GLObjectLabel:
+ return "glObjectLabel";
+ case EntryPoint::GLObjectLabelKHR:
+ return "glObjectLabelKHR";
+ case EntryPoint::GLObjectPtrLabel:
+ return "glObjectPtrLabel";
+ case EntryPoint::GLObjectPtrLabelKHR:
+ return "glObjectPtrLabelKHR";
+ case EntryPoint::GLOrtho:
+ return "glOrtho";
+ case EntryPoint::GLOrthof:
+ return "glOrthof";
+ case EntryPoint::GLOrthox:
+ return "glOrthox";
+ case EntryPoint::GLPassThrough:
+ return "glPassThrough";
+ case EntryPoint::GLPatchParameterfv:
+ return "glPatchParameterfv";
+ case EntryPoint::GLPatchParameteri:
+ return "glPatchParameteri";
+ case EntryPoint::GLPatchParameteriEXT:
+ return "glPatchParameteriEXT";
+ case EntryPoint::GLPauseTransformFeedback:
+ return "glPauseTransformFeedback";
+ case EntryPoint::GLPixelLocalStorageBarrierANGLE:
+ return "glPixelLocalStorageBarrierANGLE";
+ case EntryPoint::GLPixelMapfv:
+ return "glPixelMapfv";
+ case EntryPoint::GLPixelMapuiv:
+ return "glPixelMapuiv";
+ case EntryPoint::GLPixelMapusv:
+ return "glPixelMapusv";
+ case EntryPoint::GLPixelStoref:
+ return "glPixelStoref";
+ case EntryPoint::GLPixelStorei:
+ return "glPixelStorei";
+ case EntryPoint::GLPixelTransferf:
+ return "glPixelTransferf";
+ case EntryPoint::GLPixelTransferi:
+ return "glPixelTransferi";
+ case EntryPoint::GLPixelZoom:
+ return "glPixelZoom";
+ case EntryPoint::GLPointParameterf:
+ return "glPointParameterf";
+ case EntryPoint::GLPointParameterfv:
+ return "glPointParameterfv";
+ case EntryPoint::GLPointParameteri:
+ return "glPointParameteri";
+ case EntryPoint::GLPointParameteriv:
+ return "glPointParameteriv";
+ case EntryPoint::GLPointParameterx:
+ return "glPointParameterx";
+ case EntryPoint::GLPointParameterxv:
+ return "glPointParameterxv";
+ case EntryPoint::GLPointSize:
+ return "glPointSize";
+ case EntryPoint::GLPointSizePointerOES:
+ return "glPointSizePointerOES";
+ case EntryPoint::GLPointSizex:
+ return "glPointSizex";
+ case EntryPoint::GLPolygonMode:
+ return "glPolygonMode";
+ case EntryPoint::GLPolygonOffset:
+ return "glPolygonOffset";
+ case EntryPoint::GLPolygonOffsetClamp:
+ return "glPolygonOffsetClamp";
+ case EntryPoint::GLPolygonOffsetx:
+ return "glPolygonOffsetx";
+ case EntryPoint::GLPolygonStipple:
+ return "glPolygonStipple";
+ case EntryPoint::GLPopAttrib:
+ return "glPopAttrib";
+ case EntryPoint::GLPopClientAttrib:
+ return "glPopClientAttrib";
+ case EntryPoint::GLPopDebugGroup:
+ return "glPopDebugGroup";
+ case EntryPoint::GLPopDebugGroupKHR:
+ return "glPopDebugGroupKHR";
+ case EntryPoint::GLPopGroupMarkerEXT:
+ return "glPopGroupMarkerEXT";
+ case EntryPoint::GLPopMatrix:
+ return "glPopMatrix";
+ case EntryPoint::GLPopName:
+ return "glPopName";
+ case EntryPoint::GLPrimitiveBoundingBox:
+ return "glPrimitiveBoundingBox";
+ case EntryPoint::GLPrimitiveBoundingBoxEXT:
+ return "glPrimitiveBoundingBoxEXT";
+ case EntryPoint::GLPrimitiveBoundingBoxOES:
+ return "glPrimitiveBoundingBoxOES";
+ case EntryPoint::GLPrimitiveRestartIndex:
+ return "glPrimitiveRestartIndex";
+ case EntryPoint::GLPrioritizeTextures:
+ return "glPrioritizeTextures";
+ case EntryPoint::GLProgramBinary:
+ return "glProgramBinary";
+ case EntryPoint::GLProgramBinaryOES:
+ return "glProgramBinaryOES";
+ case EntryPoint::GLProgramParameteri:
+ return "glProgramParameteri";
+ case EntryPoint::GLProgramParameteriEXT:
+ return "glProgramParameteriEXT";
+ case EntryPoint::GLProgramUniform1d:
+ return "glProgramUniform1d";
+ case EntryPoint::GLProgramUniform1dv:
+ return "glProgramUniform1dv";
+ case EntryPoint::GLProgramUniform1f:
+ return "glProgramUniform1f";
+ case EntryPoint::GLProgramUniform1fEXT:
+ return "glProgramUniform1fEXT";
+ case EntryPoint::GLProgramUniform1fv:
+ return "glProgramUniform1fv";
+ case EntryPoint::GLProgramUniform1fvEXT:
+ return "glProgramUniform1fvEXT";
+ case EntryPoint::GLProgramUniform1i:
+ return "glProgramUniform1i";
+ case EntryPoint::GLProgramUniform1iEXT:
+ return "glProgramUniform1iEXT";
+ case EntryPoint::GLProgramUniform1iv:
+ return "glProgramUniform1iv";
+ case EntryPoint::GLProgramUniform1ivEXT:
+ return "glProgramUniform1ivEXT";
+ case EntryPoint::GLProgramUniform1ui:
+ return "glProgramUniform1ui";
+ case EntryPoint::GLProgramUniform1uiEXT:
+ return "glProgramUniform1uiEXT";
+ case EntryPoint::GLProgramUniform1uiv:
+ return "glProgramUniform1uiv";
+ case EntryPoint::GLProgramUniform1uivEXT:
+ return "glProgramUniform1uivEXT";
+ case EntryPoint::GLProgramUniform2d:
+ return "glProgramUniform2d";
+ case EntryPoint::GLProgramUniform2dv:
+ return "glProgramUniform2dv";
+ case EntryPoint::GLProgramUniform2f:
+ return "glProgramUniform2f";
+ case EntryPoint::GLProgramUniform2fEXT:
+ return "glProgramUniform2fEXT";
+ case EntryPoint::GLProgramUniform2fv:
+ return "glProgramUniform2fv";
+ case EntryPoint::GLProgramUniform2fvEXT:
+ return "glProgramUniform2fvEXT";
+ case EntryPoint::GLProgramUniform2i:
+ return "glProgramUniform2i";
+ case EntryPoint::GLProgramUniform2iEXT:
+ return "glProgramUniform2iEXT";
+ case EntryPoint::GLProgramUniform2iv:
+ return "glProgramUniform2iv";
+ case EntryPoint::GLProgramUniform2ivEXT:
+ return "glProgramUniform2ivEXT";
+ case EntryPoint::GLProgramUniform2ui:
+ return "glProgramUniform2ui";
+ case EntryPoint::GLProgramUniform2uiEXT:
+ return "glProgramUniform2uiEXT";
+ case EntryPoint::GLProgramUniform2uiv:
+ return "glProgramUniform2uiv";
+ case EntryPoint::GLProgramUniform2uivEXT:
+ return "glProgramUniform2uivEXT";
+ case EntryPoint::GLProgramUniform3d:
+ return "glProgramUniform3d";
+ case EntryPoint::GLProgramUniform3dv:
+ return "glProgramUniform3dv";
+ case EntryPoint::GLProgramUniform3f:
+ return "glProgramUniform3f";
+ case EntryPoint::GLProgramUniform3fEXT:
+ return "glProgramUniform3fEXT";
+ case EntryPoint::GLProgramUniform3fv:
+ return "glProgramUniform3fv";
+ case EntryPoint::GLProgramUniform3fvEXT:
+ return "glProgramUniform3fvEXT";
+ case EntryPoint::GLProgramUniform3i:
+ return "glProgramUniform3i";
+ case EntryPoint::GLProgramUniform3iEXT:
+ return "glProgramUniform3iEXT";
+ case EntryPoint::GLProgramUniform3iv:
+ return "glProgramUniform3iv";
+ case EntryPoint::GLProgramUniform3ivEXT:
+ return "glProgramUniform3ivEXT";
+ case EntryPoint::GLProgramUniform3ui:
+ return "glProgramUniform3ui";
+ case EntryPoint::GLProgramUniform3uiEXT:
+ return "glProgramUniform3uiEXT";
+ case EntryPoint::GLProgramUniform3uiv:
+ return "glProgramUniform3uiv";
+ case EntryPoint::GLProgramUniform3uivEXT:
+ return "glProgramUniform3uivEXT";
+ case EntryPoint::GLProgramUniform4d:
+ return "glProgramUniform4d";
+ case EntryPoint::GLProgramUniform4dv:
+ return "glProgramUniform4dv";
+ case EntryPoint::GLProgramUniform4f:
+ return "glProgramUniform4f";
+ case EntryPoint::GLProgramUniform4fEXT:
+ return "glProgramUniform4fEXT";
+ case EntryPoint::GLProgramUniform4fv:
+ return "glProgramUniform4fv";
+ case EntryPoint::GLProgramUniform4fvEXT:
+ return "glProgramUniform4fvEXT";
+ case EntryPoint::GLProgramUniform4i:
+ return "glProgramUniform4i";
+ case EntryPoint::GLProgramUniform4iEXT:
+ return "glProgramUniform4iEXT";
+ case EntryPoint::GLProgramUniform4iv:
+ return "glProgramUniform4iv";
+ case EntryPoint::GLProgramUniform4ivEXT:
+ return "glProgramUniform4ivEXT";
+ case EntryPoint::GLProgramUniform4ui:
+ return "glProgramUniform4ui";
+ case EntryPoint::GLProgramUniform4uiEXT:
+ return "glProgramUniform4uiEXT";
+ case EntryPoint::GLProgramUniform4uiv:
+ return "glProgramUniform4uiv";
+ case EntryPoint::GLProgramUniform4uivEXT:
+ return "glProgramUniform4uivEXT";
+ case EntryPoint::GLProgramUniformMatrix2dv:
+ return "glProgramUniformMatrix2dv";
+ case EntryPoint::GLProgramUniformMatrix2fv:
+ return "glProgramUniformMatrix2fv";
+ case EntryPoint::GLProgramUniformMatrix2fvEXT:
+ return "glProgramUniformMatrix2fvEXT";
+ case EntryPoint::GLProgramUniformMatrix2x3dv:
+ return "glProgramUniformMatrix2x3dv";
+ case EntryPoint::GLProgramUniformMatrix2x3fv:
+ return "glProgramUniformMatrix2x3fv";
+ case EntryPoint::GLProgramUniformMatrix2x3fvEXT:
+ return "glProgramUniformMatrix2x3fvEXT";
+ case EntryPoint::GLProgramUniformMatrix2x4dv:
+ return "glProgramUniformMatrix2x4dv";
+ case EntryPoint::GLProgramUniformMatrix2x4fv:
+ return "glProgramUniformMatrix2x4fv";
+ case EntryPoint::GLProgramUniformMatrix2x4fvEXT:
+ return "glProgramUniformMatrix2x4fvEXT";
+ case EntryPoint::GLProgramUniformMatrix3dv:
+ return "glProgramUniformMatrix3dv";
+ case EntryPoint::GLProgramUniformMatrix3fv:
+ return "glProgramUniformMatrix3fv";
+ case EntryPoint::GLProgramUniformMatrix3fvEXT:
+ return "glProgramUniformMatrix3fvEXT";
+ case EntryPoint::GLProgramUniformMatrix3x2dv:
+ return "glProgramUniformMatrix3x2dv";
+ case EntryPoint::GLProgramUniformMatrix3x2fv:
+ return "glProgramUniformMatrix3x2fv";
+ case EntryPoint::GLProgramUniformMatrix3x2fvEXT:
+ return "glProgramUniformMatrix3x2fvEXT";
+ case EntryPoint::GLProgramUniformMatrix3x4dv:
+ return "glProgramUniformMatrix3x4dv";
+ case EntryPoint::GLProgramUniformMatrix3x4fv:
+ return "glProgramUniformMatrix3x4fv";
+ case EntryPoint::GLProgramUniformMatrix3x4fvEXT:
+ return "glProgramUniformMatrix3x4fvEXT";
+ case EntryPoint::GLProgramUniformMatrix4dv:
+ return "glProgramUniformMatrix4dv";
+ case EntryPoint::GLProgramUniformMatrix4fv:
+ return "glProgramUniformMatrix4fv";
+ case EntryPoint::GLProgramUniformMatrix4fvEXT:
+ return "glProgramUniformMatrix4fvEXT";
+ case EntryPoint::GLProgramUniformMatrix4x2dv:
+ return "glProgramUniformMatrix4x2dv";
+ case EntryPoint::GLProgramUniformMatrix4x2fv:
+ return "glProgramUniformMatrix4x2fv";
+ case EntryPoint::GLProgramUniformMatrix4x2fvEXT:
+ return "glProgramUniformMatrix4x2fvEXT";
+ case EntryPoint::GLProgramUniformMatrix4x3dv:
+ return "glProgramUniformMatrix4x3dv";
+ case EntryPoint::GLProgramUniformMatrix4x3fv:
+ return "glProgramUniformMatrix4x3fv";
+ case EntryPoint::GLProgramUniformMatrix4x3fvEXT:
+ return "glProgramUniformMatrix4x3fvEXT";
+ case EntryPoint::GLProvokingVertex:
+ return "glProvokingVertex";
+ case EntryPoint::GLProvokingVertexANGLE:
+ return "glProvokingVertexANGLE";
+ case EntryPoint::GLPushAttrib:
+ return "glPushAttrib";
+ case EntryPoint::GLPushClientAttrib:
+ return "glPushClientAttrib";
+ case EntryPoint::GLPushDebugGroup:
+ return "glPushDebugGroup";
+ case EntryPoint::GLPushDebugGroupKHR:
+ return "glPushDebugGroupKHR";
+ case EntryPoint::GLPushGroupMarkerEXT:
+ return "glPushGroupMarkerEXT";
+ case EntryPoint::GLPushMatrix:
+ return "glPushMatrix";
+ case EntryPoint::GLPushName:
+ return "glPushName";
+ case EntryPoint::GLQueryCounter:
+ return "glQueryCounter";
+ case EntryPoint::GLQueryCounterEXT:
+ return "glQueryCounterEXT";
+ case EntryPoint::GLQueryMatrixxOES:
+ return "glQueryMatrixxOES";
+ case EntryPoint::GLRasterPos2d:
+ return "glRasterPos2d";
+ case EntryPoint::GLRasterPos2dv:
+ return "glRasterPos2dv";
+ case EntryPoint::GLRasterPos2f:
+ return "glRasterPos2f";
+ case EntryPoint::GLRasterPos2fv:
+ return "glRasterPos2fv";
+ case EntryPoint::GLRasterPos2i:
+ return "glRasterPos2i";
+ case EntryPoint::GLRasterPos2iv:
+ return "glRasterPos2iv";
+ case EntryPoint::GLRasterPos2s:
+ return "glRasterPos2s";
+ case EntryPoint::GLRasterPos2sv:
+ return "glRasterPos2sv";
+ case EntryPoint::GLRasterPos3d:
+ return "glRasterPos3d";
+ case EntryPoint::GLRasterPos3dv:
+ return "glRasterPos3dv";
+ case EntryPoint::GLRasterPos3f:
+ return "glRasterPos3f";
+ case EntryPoint::GLRasterPos3fv:
+ return "glRasterPos3fv";
+ case EntryPoint::GLRasterPos3i:
+ return "glRasterPos3i";
+ case EntryPoint::GLRasterPos3iv:
+ return "glRasterPos3iv";
+ case EntryPoint::GLRasterPos3s:
+ return "glRasterPos3s";
+ case EntryPoint::GLRasterPos3sv:
+ return "glRasterPos3sv";
+ case EntryPoint::GLRasterPos4d:
+ return "glRasterPos4d";
+ case EntryPoint::GLRasterPos4dv:
+ return "glRasterPos4dv";
+ case EntryPoint::GLRasterPos4f:
+ return "glRasterPos4f";
+ case EntryPoint::GLRasterPos4fv:
+ return "glRasterPos4fv";
+ case EntryPoint::GLRasterPos4i:
+ return "glRasterPos4i";
+ case EntryPoint::GLRasterPos4iv:
+ return "glRasterPos4iv";
+ case EntryPoint::GLRasterPos4s:
+ return "glRasterPos4s";
+ case EntryPoint::GLRasterPos4sv:
+ return "glRasterPos4sv";
+ case EntryPoint::GLReadBuffer:
+ return "glReadBuffer";
+ case EntryPoint::GLReadPixels:
+ return "glReadPixels";
+ case EntryPoint::GLReadPixelsRobustANGLE:
+ return "glReadPixelsRobustANGLE";
+ case EntryPoint::GLReadnPixels:
+ return "glReadnPixels";
+ case EntryPoint::GLReadnPixelsEXT:
+ return "glReadnPixelsEXT";
+ case EntryPoint::GLReadnPixelsRobustANGLE:
+ return "glReadnPixelsRobustANGLE";
+ case EntryPoint::GLRectd:
+ return "glRectd";
+ case EntryPoint::GLRectdv:
+ return "glRectdv";
+ case EntryPoint::GLRectf:
+ return "glRectf";
+ case EntryPoint::GLRectfv:
+ return "glRectfv";
+ case EntryPoint::GLRecti:
+ return "glRecti";
+ case EntryPoint::GLRectiv:
+ return "glRectiv";
+ case EntryPoint::GLRects:
+ return "glRects";
+ case EntryPoint::GLRectsv:
+ return "glRectsv";
+ case EntryPoint::GLReleaseShaderCompiler:
+ return "glReleaseShaderCompiler";
+ case EntryPoint::GLReleaseTexturesANGLE:
+ return "glReleaseTexturesANGLE";
+ case EntryPoint::GLRenderMode:
+ return "glRenderMode";
+ case EntryPoint::GLRenderbufferStorage:
+ return "glRenderbufferStorage";
+ case EntryPoint::GLRenderbufferStorageMultisample:
+ return "glRenderbufferStorageMultisample";
+ case EntryPoint::GLRenderbufferStorageMultisampleANGLE:
+ return "glRenderbufferStorageMultisampleANGLE";
+ case EntryPoint::GLRenderbufferStorageMultisampleEXT:
+ return "glRenderbufferStorageMultisampleEXT";
+ case EntryPoint::GLRenderbufferStorageOES:
+ return "glRenderbufferStorageOES";
+ case EntryPoint::GLRequestExtensionANGLE:
+ return "glRequestExtensionANGLE";
+ case EntryPoint::GLResumeTransformFeedback:
+ return "glResumeTransformFeedback";
+ case EntryPoint::GLRotated:
+ return "glRotated";
+ case EntryPoint::GLRotatef:
+ return "glRotatef";
+ case EntryPoint::GLRotatex:
+ return "glRotatex";
+ case EntryPoint::GLSampleCoverage:
+ return "glSampleCoverage";
+ case EntryPoint::GLSampleCoveragex:
+ return "glSampleCoveragex";
+ case EntryPoint::GLSampleMaski:
+ return "glSampleMaski";
+ case EntryPoint::GLSampleMaskiANGLE:
+ return "glSampleMaskiANGLE";
+ case EntryPoint::GLSamplerParameterIiv:
+ return "glSamplerParameterIiv";
+ case EntryPoint::GLSamplerParameterIivEXT:
+ return "glSamplerParameterIivEXT";
+ case EntryPoint::GLSamplerParameterIivOES:
+ return "glSamplerParameterIivOES";
+ case EntryPoint::GLSamplerParameterIivRobustANGLE:
+ return "glSamplerParameterIivRobustANGLE";
+ case EntryPoint::GLSamplerParameterIuiv:
+ return "glSamplerParameterIuiv";
+ case EntryPoint::GLSamplerParameterIuivEXT:
+ return "glSamplerParameterIuivEXT";
+ case EntryPoint::GLSamplerParameterIuivOES:
+ return "glSamplerParameterIuivOES";
+ case EntryPoint::GLSamplerParameterIuivRobustANGLE:
+ return "glSamplerParameterIuivRobustANGLE";
+ case EntryPoint::GLSamplerParameterf:
+ return "glSamplerParameterf";
+ case EntryPoint::GLSamplerParameterfv:
+ return "glSamplerParameterfv";
+ case EntryPoint::GLSamplerParameterfvRobustANGLE:
+ return "glSamplerParameterfvRobustANGLE";
+ case EntryPoint::GLSamplerParameteri:
+ return "glSamplerParameteri";
+ case EntryPoint::GLSamplerParameteriv:
+ return "glSamplerParameteriv";
+ case EntryPoint::GLSamplerParameterivRobustANGLE:
+ return "glSamplerParameterivRobustANGLE";
+ case EntryPoint::GLScaled:
+ return "glScaled";
+ case EntryPoint::GLScalef:
+ return "glScalef";
+ case EntryPoint::GLScalex:
+ return "glScalex";
+ case EntryPoint::GLScissor:
+ return "glScissor";
+ case EntryPoint::GLScissorArrayv:
+ return "glScissorArrayv";
+ case EntryPoint::GLScissorIndexed:
+ return "glScissorIndexed";
+ case EntryPoint::GLScissorIndexedv:
+ return "glScissorIndexedv";
+ case EntryPoint::GLSecondaryColor3b:
+ return "glSecondaryColor3b";
+ case EntryPoint::GLSecondaryColor3bv:
+ return "glSecondaryColor3bv";
+ case EntryPoint::GLSecondaryColor3d:
+ return "glSecondaryColor3d";
+ case EntryPoint::GLSecondaryColor3dv:
+ return "glSecondaryColor3dv";
+ case EntryPoint::GLSecondaryColor3f:
+ return "glSecondaryColor3f";
+ case EntryPoint::GLSecondaryColor3fv:
+ return "glSecondaryColor3fv";
+ case EntryPoint::GLSecondaryColor3i:
+ return "glSecondaryColor3i";
+ case EntryPoint::GLSecondaryColor3iv:
+ return "glSecondaryColor3iv";
+ case EntryPoint::GLSecondaryColor3s:
+ return "glSecondaryColor3s";
+ case EntryPoint::GLSecondaryColor3sv:
+ return "glSecondaryColor3sv";
+ case EntryPoint::GLSecondaryColor3ub:
+ return "glSecondaryColor3ub";
+ case EntryPoint::GLSecondaryColor3ubv:
+ return "glSecondaryColor3ubv";
+ case EntryPoint::GLSecondaryColor3ui:
+ return "glSecondaryColor3ui";
+ case EntryPoint::GLSecondaryColor3uiv:
+ return "glSecondaryColor3uiv";
+ case EntryPoint::GLSecondaryColor3us:
+ return "glSecondaryColor3us";
+ case EntryPoint::GLSecondaryColor3usv:
+ return "glSecondaryColor3usv";
+ case EntryPoint::GLSecondaryColorP3ui:
+ return "glSecondaryColorP3ui";
+ case EntryPoint::GLSecondaryColorP3uiv:
+ return "glSecondaryColorP3uiv";
+ case EntryPoint::GLSecondaryColorPointer:
+ return "glSecondaryColorPointer";
+ case EntryPoint::GLSelectBuffer:
+ return "glSelectBuffer";
+ case EntryPoint::GLSelectPerfMonitorCountersAMD:
+ return "glSelectPerfMonitorCountersAMD";
+ case EntryPoint::GLSemaphoreParameterui64vEXT:
+ return "glSemaphoreParameterui64vEXT";
+ case EntryPoint::GLSetFenceNV:
+ return "glSetFenceNV";
+ case EntryPoint::GLShadeModel:
+ return "glShadeModel";
+ case EntryPoint::GLShaderBinary:
+ return "glShaderBinary";
+ case EntryPoint::GLShaderSource:
+ return "glShaderSource";
+ case EntryPoint::GLShaderStorageBlockBinding:
+ return "glShaderStorageBlockBinding";
+ case EntryPoint::GLShadingRateQCOM:
+ return "glShadingRateQCOM";
+ case EntryPoint::GLSignalSemaphoreEXT:
+ return "glSignalSemaphoreEXT";
+ case EntryPoint::GLSpecializeShader:
+ return "glSpecializeShader";
+ case EntryPoint::GLStencilFunc:
+ return "glStencilFunc";
+ case EntryPoint::GLStencilFuncSeparate:
+ return "glStencilFuncSeparate";
+ case EntryPoint::GLStencilMask:
+ return "glStencilMask";
+ case EntryPoint::GLStencilMaskSeparate:
+ return "glStencilMaskSeparate";
+ case EntryPoint::GLStencilOp:
+ return "glStencilOp";
+ case EntryPoint::GLStencilOpSeparate:
+ return "glStencilOpSeparate";
+ case EntryPoint::GLTestFenceNV:
+ return "glTestFenceNV";
+ case EntryPoint::GLTexBuffer:
+ return "glTexBuffer";
+ case EntryPoint::GLTexBufferEXT:
+ return "glTexBufferEXT";
+ case EntryPoint::GLTexBufferOES:
+ return "glTexBufferOES";
+ case EntryPoint::GLTexBufferRange:
+ return "glTexBufferRange";
+ case EntryPoint::GLTexBufferRangeEXT:
+ return "glTexBufferRangeEXT";
+ case EntryPoint::GLTexBufferRangeOES:
+ return "glTexBufferRangeOES";
+ case EntryPoint::GLTexCoord1d:
+ return "glTexCoord1d";
+ case EntryPoint::GLTexCoord1dv:
+ return "glTexCoord1dv";
+ case EntryPoint::GLTexCoord1f:
+ return "glTexCoord1f";
+ case EntryPoint::GLTexCoord1fv:
+ return "glTexCoord1fv";
+ case EntryPoint::GLTexCoord1i:
+ return "glTexCoord1i";
+ case EntryPoint::GLTexCoord1iv:
+ return "glTexCoord1iv";
+ case EntryPoint::GLTexCoord1s:
+ return "glTexCoord1s";
+ case EntryPoint::GLTexCoord1sv:
+ return "glTexCoord1sv";
+ case EntryPoint::GLTexCoord2d:
+ return "glTexCoord2d";
+ case EntryPoint::GLTexCoord2dv:
+ return "glTexCoord2dv";
+ case EntryPoint::GLTexCoord2f:
+ return "glTexCoord2f";
+ case EntryPoint::GLTexCoord2fv:
+ return "glTexCoord2fv";
+ case EntryPoint::GLTexCoord2i:
+ return "glTexCoord2i";
+ case EntryPoint::GLTexCoord2iv:
+ return "glTexCoord2iv";
+ case EntryPoint::GLTexCoord2s:
+ return "glTexCoord2s";
+ case EntryPoint::GLTexCoord2sv:
+ return "glTexCoord2sv";
+ case EntryPoint::GLTexCoord3d:
+ return "glTexCoord3d";
+ case EntryPoint::GLTexCoord3dv:
+ return "glTexCoord3dv";
+ case EntryPoint::GLTexCoord3f:
+ return "glTexCoord3f";
+ case EntryPoint::GLTexCoord3fv:
+ return "glTexCoord3fv";
+ case EntryPoint::GLTexCoord3i:
+ return "glTexCoord3i";
+ case EntryPoint::GLTexCoord3iv:
+ return "glTexCoord3iv";
+ case EntryPoint::GLTexCoord3s:
+ return "glTexCoord3s";
+ case EntryPoint::GLTexCoord3sv:
+ return "glTexCoord3sv";
+ case EntryPoint::GLTexCoord4d:
+ return "glTexCoord4d";
+ case EntryPoint::GLTexCoord4dv:
+ return "glTexCoord4dv";
+ case EntryPoint::GLTexCoord4f:
+ return "glTexCoord4f";
+ case EntryPoint::GLTexCoord4fv:
+ return "glTexCoord4fv";
+ case EntryPoint::GLTexCoord4i:
+ return "glTexCoord4i";
+ case EntryPoint::GLTexCoord4iv:
+ return "glTexCoord4iv";
+ case EntryPoint::GLTexCoord4s:
+ return "glTexCoord4s";
+ case EntryPoint::GLTexCoord4sv:
+ return "glTexCoord4sv";
+ case EntryPoint::GLTexCoordP1ui:
+ return "glTexCoordP1ui";
+ case EntryPoint::GLTexCoordP1uiv:
+ return "glTexCoordP1uiv";
+ case EntryPoint::GLTexCoordP2ui:
+ return "glTexCoordP2ui";
+ case EntryPoint::GLTexCoordP2uiv:
+ return "glTexCoordP2uiv";
+ case EntryPoint::GLTexCoordP3ui:
+ return "glTexCoordP3ui";
+ case EntryPoint::GLTexCoordP3uiv:
+ return "glTexCoordP3uiv";
+ case EntryPoint::GLTexCoordP4ui:
+ return "glTexCoordP4ui";
+ case EntryPoint::GLTexCoordP4uiv:
+ return "glTexCoordP4uiv";
+ case EntryPoint::GLTexCoordPointer:
+ return "glTexCoordPointer";
+ case EntryPoint::GLTexEnvf:
+ return "glTexEnvf";
+ case EntryPoint::GLTexEnvfv:
+ return "glTexEnvfv";
+ case EntryPoint::GLTexEnvi:
+ return "glTexEnvi";
+ case EntryPoint::GLTexEnviv:
+ return "glTexEnviv";
+ case EntryPoint::GLTexEnvx:
+ return "glTexEnvx";
+ case EntryPoint::GLTexEnvxv:
+ return "glTexEnvxv";
+ case EntryPoint::GLTexGend:
+ return "glTexGend";
+ case EntryPoint::GLTexGendv:
+ return "glTexGendv";
+ case EntryPoint::GLTexGenf:
+ return "glTexGenf";
+ case EntryPoint::GLTexGenfOES:
+ return "glTexGenfOES";
+ case EntryPoint::GLTexGenfv:
+ return "glTexGenfv";
+ case EntryPoint::GLTexGenfvOES:
+ return "glTexGenfvOES";
+ case EntryPoint::GLTexGeni:
+ return "glTexGeni";
+ case EntryPoint::GLTexGeniOES:
+ return "glTexGeniOES";
+ case EntryPoint::GLTexGeniv:
+ return "glTexGeniv";
+ case EntryPoint::GLTexGenivOES:
+ return "glTexGenivOES";
+ case EntryPoint::GLTexGenxOES:
+ return "glTexGenxOES";
+ case EntryPoint::GLTexGenxvOES:
+ return "glTexGenxvOES";
+ case EntryPoint::GLTexImage1D:
+ return "glTexImage1D";
+ case EntryPoint::GLTexImage2D:
+ return "glTexImage2D";
+ case EntryPoint::GLTexImage2DExternalANGLE:
+ return "glTexImage2DExternalANGLE";
+ case EntryPoint::GLTexImage2DMultisample:
+ return "glTexImage2DMultisample";
+ case EntryPoint::GLTexImage2DRobustANGLE:
+ return "glTexImage2DRobustANGLE";
+ case EntryPoint::GLTexImage3D:
+ return "glTexImage3D";
+ case EntryPoint::GLTexImage3DMultisample:
+ return "glTexImage3DMultisample";
+ case EntryPoint::GLTexImage3DOES:
+ return "glTexImage3DOES";
+ case EntryPoint::GLTexImage3DRobustANGLE:
+ return "glTexImage3DRobustANGLE";
+ case EntryPoint::GLTexParameterIiv:
+ return "glTexParameterIiv";
+ case EntryPoint::GLTexParameterIivEXT:
+ return "glTexParameterIivEXT";
+ case EntryPoint::GLTexParameterIivOES:
+ return "glTexParameterIivOES";
+ case EntryPoint::GLTexParameterIivRobustANGLE:
+ return "glTexParameterIivRobustANGLE";
+ case EntryPoint::GLTexParameterIuiv:
+ return "glTexParameterIuiv";
+ case EntryPoint::GLTexParameterIuivEXT:
+ return "glTexParameterIuivEXT";
+ case EntryPoint::GLTexParameterIuivOES:
+ return "glTexParameterIuivOES";
+ case EntryPoint::GLTexParameterIuivRobustANGLE:
+ return "glTexParameterIuivRobustANGLE";
+ case EntryPoint::GLTexParameterf:
+ return "glTexParameterf";
+ case EntryPoint::GLTexParameterfv:
+ return "glTexParameterfv";
+ case EntryPoint::GLTexParameterfvRobustANGLE:
+ return "glTexParameterfvRobustANGLE";
+ case EntryPoint::GLTexParameteri:
+ return "glTexParameteri";
+ case EntryPoint::GLTexParameteriv:
+ return "glTexParameteriv";
+ case EntryPoint::GLTexParameterivRobustANGLE:
+ return "glTexParameterivRobustANGLE";
+ case EntryPoint::GLTexParameterx:
+ return "glTexParameterx";
+ case EntryPoint::GLTexParameterxv:
+ return "glTexParameterxv";
+ case EntryPoint::GLTexStorage1D:
+ return "glTexStorage1D";
+ case EntryPoint::GLTexStorage1DEXT:
+ return "glTexStorage1DEXT";
+ case EntryPoint::GLTexStorage2D:
+ return "glTexStorage2D";
+ case EntryPoint::GLTexStorage2DEXT:
+ return "glTexStorage2DEXT";
+ case EntryPoint::GLTexStorage2DMultisample:
+ return "glTexStorage2DMultisample";
+ case EntryPoint::GLTexStorage2DMultisampleANGLE:
+ return "glTexStorage2DMultisampleANGLE";
+ case EntryPoint::GLTexStorage3D:
+ return "glTexStorage3D";
+ case EntryPoint::GLTexStorage3DEXT:
+ return "glTexStorage3DEXT";
+ case EntryPoint::GLTexStorage3DMultisample:
+ return "glTexStorage3DMultisample";
+ case EntryPoint::GLTexStorage3DMultisampleOES:
+ return "glTexStorage3DMultisampleOES";
+ case EntryPoint::GLTexStorageMem2DEXT:
+ return "glTexStorageMem2DEXT";
+ case EntryPoint::GLTexStorageMem2DMultisampleEXT:
+ return "glTexStorageMem2DMultisampleEXT";
+ case EntryPoint::GLTexStorageMem3DEXT:
+ return "glTexStorageMem3DEXT";
+ case EntryPoint::GLTexStorageMem3DMultisampleEXT:
+ return "glTexStorageMem3DMultisampleEXT";
+ case EntryPoint::GLTexStorageMemFlags2DANGLE:
+ return "glTexStorageMemFlags2DANGLE";
+ case EntryPoint::GLTexStorageMemFlags2DMultisampleANGLE:
+ return "glTexStorageMemFlags2DMultisampleANGLE";
+ case EntryPoint::GLTexStorageMemFlags3DANGLE:
+ return "glTexStorageMemFlags3DANGLE";
+ case EntryPoint::GLTexStorageMemFlags3DMultisampleANGLE:
+ return "glTexStorageMemFlags3DMultisampleANGLE";
+ case EntryPoint::GLTexSubImage1D:
+ return "glTexSubImage1D";
+ case EntryPoint::GLTexSubImage2D:
+ return "glTexSubImage2D";
+ case EntryPoint::GLTexSubImage2DRobustANGLE:
+ return "glTexSubImage2DRobustANGLE";
+ case EntryPoint::GLTexSubImage3D:
+ return "glTexSubImage3D";
+ case EntryPoint::GLTexSubImage3DOES:
+ return "glTexSubImage3DOES";
+ case EntryPoint::GLTexSubImage3DRobustANGLE:
+ return "glTexSubImage3DRobustANGLE";
+ case EntryPoint::GLTextureBarrier:
+ return "glTextureBarrier";
+ case EntryPoint::GLTextureBuffer:
+ return "glTextureBuffer";
+ case EntryPoint::GLTextureBufferRange:
+ return "glTextureBufferRange";
+ case EntryPoint::GLTextureParameterIiv:
+ return "glTextureParameterIiv";
+ case EntryPoint::GLTextureParameterIuiv:
+ return "glTextureParameterIuiv";
+ case EntryPoint::GLTextureParameterf:
+ return "glTextureParameterf";
+ case EntryPoint::GLTextureParameterfv:
+ return "glTextureParameterfv";
+ case EntryPoint::GLTextureParameteri:
+ return "glTextureParameteri";
+ case EntryPoint::GLTextureParameteriv:
+ return "glTextureParameteriv";
+ case EntryPoint::GLTextureStorage1D:
+ return "glTextureStorage1D";
+ case EntryPoint::GLTextureStorage2D:
+ return "glTextureStorage2D";
+ case EntryPoint::GLTextureStorage2DMultisample:
+ return "glTextureStorage2DMultisample";
+ case EntryPoint::GLTextureStorage3D:
+ return "glTextureStorage3D";
+ case EntryPoint::GLTextureStorage3DMultisample:
+ return "glTextureStorage3DMultisample";
+ case EntryPoint::GLTextureSubImage1D:
+ return "glTextureSubImage1D";
+ case EntryPoint::GLTextureSubImage2D:
+ return "glTextureSubImage2D";
+ case EntryPoint::GLTextureSubImage3D:
+ return "glTextureSubImage3D";
+ case EntryPoint::GLTextureView:
+ return "glTextureView";
+ case EntryPoint::GLTransformFeedbackBufferBase:
+ return "glTransformFeedbackBufferBase";
+ case EntryPoint::GLTransformFeedbackBufferRange:
+ return "glTransformFeedbackBufferRange";
+ case EntryPoint::GLTransformFeedbackVaryings:
+ return "glTransformFeedbackVaryings";
+ case EntryPoint::GLTranslated:
+ return "glTranslated";
+ case EntryPoint::GLTranslatef:
+ return "glTranslatef";
+ case EntryPoint::GLTranslatex:
+ return "glTranslatex";
+ case EntryPoint::GLUniform1d:
+ return "glUniform1d";
+ case EntryPoint::GLUniform1dv:
+ return "glUniform1dv";
+ case EntryPoint::GLUniform1f:
+ return "glUniform1f";
+ case EntryPoint::GLUniform1fv:
+ return "glUniform1fv";
+ case EntryPoint::GLUniform1i:
+ return "glUniform1i";
+ case EntryPoint::GLUniform1iv:
+ return "glUniform1iv";
+ case EntryPoint::GLUniform1ui:
+ return "glUniform1ui";
+ case EntryPoint::GLUniform1uiv:
+ return "glUniform1uiv";
+ case EntryPoint::GLUniform2d:
+ return "glUniform2d";
+ case EntryPoint::GLUniform2dv:
+ return "glUniform2dv";
+ case EntryPoint::GLUniform2f:
+ return "glUniform2f";
+ case EntryPoint::GLUniform2fv:
+ return "glUniform2fv";
+ case EntryPoint::GLUniform2i:
+ return "glUniform2i";
+ case EntryPoint::GLUniform2iv:
+ return "glUniform2iv";
+ case EntryPoint::GLUniform2ui:
+ return "glUniform2ui";
+ case EntryPoint::GLUniform2uiv:
+ return "glUniform2uiv";
+ case EntryPoint::GLUniform3d:
+ return "glUniform3d";
+ case EntryPoint::GLUniform3dv:
+ return "glUniform3dv";
+ case EntryPoint::GLUniform3f:
+ return "glUniform3f";
+ case EntryPoint::GLUniform3fv:
+ return "glUniform3fv";
+ case EntryPoint::GLUniform3i:
+ return "glUniform3i";
+ case EntryPoint::GLUniform3iv:
+ return "glUniform3iv";
+ case EntryPoint::GLUniform3ui:
+ return "glUniform3ui";
+ case EntryPoint::GLUniform3uiv:
+ return "glUniform3uiv";
+ case EntryPoint::GLUniform4d:
+ return "glUniform4d";
+ case EntryPoint::GLUniform4dv:
+ return "glUniform4dv";
+ case EntryPoint::GLUniform4f:
+ return "glUniform4f";
+ case EntryPoint::GLUniform4fv:
+ return "glUniform4fv";
+ case EntryPoint::GLUniform4i:
+ return "glUniform4i";
+ case EntryPoint::GLUniform4iv:
+ return "glUniform4iv";
+ case EntryPoint::GLUniform4ui:
+ return "glUniform4ui";
+ case EntryPoint::GLUniform4uiv:
+ return "glUniform4uiv";
+ case EntryPoint::GLUniformBlockBinding:
+ return "glUniformBlockBinding";
+ case EntryPoint::GLUniformMatrix2dv:
+ return "glUniformMatrix2dv";
+ case EntryPoint::GLUniformMatrix2fv:
+ return "glUniformMatrix2fv";
+ case EntryPoint::GLUniformMatrix2x3dv:
+ return "glUniformMatrix2x3dv";
+ case EntryPoint::GLUniformMatrix2x3fv:
+ return "glUniformMatrix2x3fv";
+ case EntryPoint::GLUniformMatrix2x4dv:
+ return "glUniformMatrix2x4dv";
+ case EntryPoint::GLUniformMatrix2x4fv:
+ return "glUniformMatrix2x4fv";
+ case EntryPoint::GLUniformMatrix3dv:
+ return "glUniformMatrix3dv";
+ case EntryPoint::GLUniformMatrix3fv:
+ return "glUniformMatrix3fv";
+ case EntryPoint::GLUniformMatrix3x2dv:
+ return "glUniformMatrix3x2dv";
+ case EntryPoint::GLUniformMatrix3x2fv:
+ return "glUniformMatrix3x2fv";
+ case EntryPoint::GLUniformMatrix3x4dv:
+ return "glUniformMatrix3x4dv";
+ case EntryPoint::GLUniformMatrix3x4fv:
+ return "glUniformMatrix3x4fv";
+ case EntryPoint::GLUniformMatrix4dv:
+ return "glUniformMatrix4dv";
+ case EntryPoint::GLUniformMatrix4fv:
+ return "glUniformMatrix4fv";
+ case EntryPoint::GLUniformMatrix4x2dv:
+ return "glUniformMatrix4x2dv";
+ case EntryPoint::GLUniformMatrix4x2fv:
+ return "glUniformMatrix4x2fv";
+ case EntryPoint::GLUniformMatrix4x3dv:
+ return "glUniformMatrix4x3dv";
+ case EntryPoint::GLUniformMatrix4x3fv:
+ return "glUniformMatrix4x3fv";
+ case EntryPoint::GLUniformSubroutinesuiv:
+ return "glUniformSubroutinesuiv";
+ case EntryPoint::GLUnmapBuffer:
+ return "glUnmapBuffer";
+ case EntryPoint::GLUnmapBufferOES:
+ return "glUnmapBufferOES";
+ case EntryPoint::GLUnmapNamedBuffer:
+ return "glUnmapNamedBuffer";
+ case EntryPoint::GLUseProgram:
+ return "glUseProgram";
+ case EntryPoint::GLUseProgramStages:
+ return "glUseProgramStages";
+ case EntryPoint::GLUseProgramStagesEXT:
+ return "glUseProgramStagesEXT";
+ case EntryPoint::GLValidateProgram:
+ return "glValidateProgram";
+ case EntryPoint::GLValidateProgramPipeline:
+ return "glValidateProgramPipeline";
+ case EntryPoint::GLValidateProgramPipelineEXT:
+ return "glValidateProgramPipelineEXT";
+ case EntryPoint::GLVertex2d:
+ return "glVertex2d";
+ case EntryPoint::GLVertex2dv:
+ return "glVertex2dv";
+ case EntryPoint::GLVertex2f:
+ return "glVertex2f";
+ case EntryPoint::GLVertex2fv:
+ return "glVertex2fv";
+ case EntryPoint::GLVertex2i:
+ return "glVertex2i";
+ case EntryPoint::GLVertex2iv:
+ return "glVertex2iv";
+ case EntryPoint::GLVertex2s:
+ return "glVertex2s";
+ case EntryPoint::GLVertex2sv:
+ return "glVertex2sv";
+ case EntryPoint::GLVertex3d:
+ return "glVertex3d";
+ case EntryPoint::GLVertex3dv:
+ return "glVertex3dv";
+ case EntryPoint::GLVertex3f:
+ return "glVertex3f";
+ case EntryPoint::GLVertex3fv:
+ return "glVertex3fv";
+ case EntryPoint::GLVertex3i:
+ return "glVertex3i";
+ case EntryPoint::GLVertex3iv:
+ return "glVertex3iv";
+ case EntryPoint::GLVertex3s:
+ return "glVertex3s";
+ case EntryPoint::GLVertex3sv:
+ return "glVertex3sv";
+ case EntryPoint::GLVertex4d:
+ return "glVertex4d";
+ case EntryPoint::GLVertex4dv:
+ return "glVertex4dv";
+ case EntryPoint::GLVertex4f:
+ return "glVertex4f";
+ case EntryPoint::GLVertex4fv:
+ return "glVertex4fv";
+ case EntryPoint::GLVertex4i:
+ return "glVertex4i";
+ case EntryPoint::GLVertex4iv:
+ return "glVertex4iv";
+ case EntryPoint::GLVertex4s:
+ return "glVertex4s";
+ case EntryPoint::GLVertex4sv:
+ return "glVertex4sv";
+ case EntryPoint::GLVertexArrayAttribBinding:
+ return "glVertexArrayAttribBinding";
+ case EntryPoint::GLVertexArrayAttribFormat:
+ return "glVertexArrayAttribFormat";
+ case EntryPoint::GLVertexArrayAttribIFormat:
+ return "glVertexArrayAttribIFormat";
+ case EntryPoint::GLVertexArrayAttribLFormat:
+ return "glVertexArrayAttribLFormat";
+ case EntryPoint::GLVertexArrayBindingDivisor:
+ return "glVertexArrayBindingDivisor";
+ case EntryPoint::GLVertexArrayElementBuffer:
+ return "glVertexArrayElementBuffer";
+ case EntryPoint::GLVertexArrayVertexBuffer:
+ return "glVertexArrayVertexBuffer";
+ case EntryPoint::GLVertexArrayVertexBuffers:
+ return "glVertexArrayVertexBuffers";
+ case EntryPoint::GLVertexAttrib1d:
+ return "glVertexAttrib1d";
+ case EntryPoint::GLVertexAttrib1dv:
+ return "glVertexAttrib1dv";
+ case EntryPoint::GLVertexAttrib1f:
+ return "glVertexAttrib1f";
+ case EntryPoint::GLVertexAttrib1fv:
+ return "glVertexAttrib1fv";
+ case EntryPoint::GLVertexAttrib1s:
+ return "glVertexAttrib1s";
+ case EntryPoint::GLVertexAttrib1sv:
+ return "glVertexAttrib1sv";
+ case EntryPoint::GLVertexAttrib2d:
+ return "glVertexAttrib2d";
+ case EntryPoint::GLVertexAttrib2dv:
+ return "glVertexAttrib2dv";
+ case EntryPoint::GLVertexAttrib2f:
+ return "glVertexAttrib2f";
+ case EntryPoint::GLVertexAttrib2fv:
+ return "glVertexAttrib2fv";
+ case EntryPoint::GLVertexAttrib2s:
+ return "glVertexAttrib2s";
+ case EntryPoint::GLVertexAttrib2sv:
+ return "glVertexAttrib2sv";
+ case EntryPoint::GLVertexAttrib3d:
+ return "glVertexAttrib3d";
+ case EntryPoint::GLVertexAttrib3dv:
+ return "glVertexAttrib3dv";
+ case EntryPoint::GLVertexAttrib3f:
+ return "glVertexAttrib3f";
+ case EntryPoint::GLVertexAttrib3fv:
+ return "glVertexAttrib3fv";
+ case EntryPoint::GLVertexAttrib3s:
+ return "glVertexAttrib3s";
+ case EntryPoint::GLVertexAttrib3sv:
+ return "glVertexAttrib3sv";
+ case EntryPoint::GLVertexAttrib4Nbv:
+ return "glVertexAttrib4Nbv";
+ case EntryPoint::GLVertexAttrib4Niv:
+ return "glVertexAttrib4Niv";
+ case EntryPoint::GLVertexAttrib4Nsv:
+ return "glVertexAttrib4Nsv";
+ case EntryPoint::GLVertexAttrib4Nub:
+ return "glVertexAttrib4Nub";
+ case EntryPoint::GLVertexAttrib4Nubv:
+ return "glVertexAttrib4Nubv";
+ case EntryPoint::GLVertexAttrib4Nuiv:
+ return "glVertexAttrib4Nuiv";
+ case EntryPoint::GLVertexAttrib4Nusv:
+ return "glVertexAttrib4Nusv";
+ case EntryPoint::GLVertexAttrib4bv:
+ return "glVertexAttrib4bv";
+ case EntryPoint::GLVertexAttrib4d:
+ return "glVertexAttrib4d";
+ case EntryPoint::GLVertexAttrib4dv:
+ return "glVertexAttrib4dv";
+ case EntryPoint::GLVertexAttrib4f:
+ return "glVertexAttrib4f";
+ case EntryPoint::GLVertexAttrib4fv:
+ return "glVertexAttrib4fv";
+ case EntryPoint::GLVertexAttrib4iv:
+ return "glVertexAttrib4iv";
+ case EntryPoint::GLVertexAttrib4s:
+ return "glVertexAttrib4s";
+ case EntryPoint::GLVertexAttrib4sv:
+ return "glVertexAttrib4sv";
+ case EntryPoint::GLVertexAttrib4ubv:
+ return "glVertexAttrib4ubv";
+ case EntryPoint::GLVertexAttrib4uiv:
+ return "glVertexAttrib4uiv";
+ case EntryPoint::GLVertexAttrib4usv:
+ return "glVertexAttrib4usv";
+ case EntryPoint::GLVertexAttribBinding:
+ return "glVertexAttribBinding";
+ case EntryPoint::GLVertexAttribDivisor:
+ return "glVertexAttribDivisor";
+ case EntryPoint::GLVertexAttribDivisorANGLE:
+ return "glVertexAttribDivisorANGLE";
+ case EntryPoint::GLVertexAttribDivisorEXT:
+ return "glVertexAttribDivisorEXT";
+ case EntryPoint::GLVertexAttribFormat:
+ return "glVertexAttribFormat";
+ case EntryPoint::GLVertexAttribI1i:
+ return "glVertexAttribI1i";
+ case EntryPoint::GLVertexAttribI1iv:
+ return "glVertexAttribI1iv";
+ case EntryPoint::GLVertexAttribI1ui:
+ return "glVertexAttribI1ui";
+ case EntryPoint::GLVertexAttribI1uiv:
+ return "glVertexAttribI1uiv";
+ case EntryPoint::GLVertexAttribI2i:
+ return "glVertexAttribI2i";
+ case EntryPoint::GLVertexAttribI2iv:
+ return "glVertexAttribI2iv";
+ case EntryPoint::GLVertexAttribI2ui:
+ return "glVertexAttribI2ui";
+ case EntryPoint::GLVertexAttribI2uiv:
+ return "glVertexAttribI2uiv";
+ case EntryPoint::GLVertexAttribI3i:
+ return "glVertexAttribI3i";
+ case EntryPoint::GLVertexAttribI3iv:
+ return "glVertexAttribI3iv";
+ case EntryPoint::GLVertexAttribI3ui:
+ return "glVertexAttribI3ui";
+ case EntryPoint::GLVertexAttribI3uiv:
+ return "glVertexAttribI3uiv";
+ case EntryPoint::GLVertexAttribI4bv:
+ return "glVertexAttribI4bv";
+ case EntryPoint::GLVertexAttribI4i:
+ return "glVertexAttribI4i";
+ case EntryPoint::GLVertexAttribI4iv:
+ return "glVertexAttribI4iv";
+ case EntryPoint::GLVertexAttribI4sv:
+ return "glVertexAttribI4sv";
+ case EntryPoint::GLVertexAttribI4ubv:
+ return "glVertexAttribI4ubv";
+ case EntryPoint::GLVertexAttribI4ui:
+ return "glVertexAttribI4ui";
+ case EntryPoint::GLVertexAttribI4uiv:
+ return "glVertexAttribI4uiv";
+ case EntryPoint::GLVertexAttribI4usv:
+ return "glVertexAttribI4usv";
+ case EntryPoint::GLVertexAttribIFormat:
+ return "glVertexAttribIFormat";
+ case EntryPoint::GLVertexAttribIPointer:
+ return "glVertexAttribIPointer";
+ case EntryPoint::GLVertexAttribL1d:
+ return "glVertexAttribL1d";
+ case EntryPoint::GLVertexAttribL1dv:
+ return "glVertexAttribL1dv";
+ case EntryPoint::GLVertexAttribL2d:
+ return "glVertexAttribL2d";
+ case EntryPoint::GLVertexAttribL2dv:
+ return "glVertexAttribL2dv";
+ case EntryPoint::GLVertexAttribL3d:
+ return "glVertexAttribL3d";
+ case EntryPoint::GLVertexAttribL3dv:
+ return "glVertexAttribL3dv";
+ case EntryPoint::GLVertexAttribL4d:
+ return "glVertexAttribL4d";
+ case EntryPoint::GLVertexAttribL4dv:
+ return "glVertexAttribL4dv";
+ case EntryPoint::GLVertexAttribLFormat:
+ return "glVertexAttribLFormat";
+ case EntryPoint::GLVertexAttribLPointer:
+ return "glVertexAttribLPointer";
+ case EntryPoint::GLVertexAttribP1ui:
+ return "glVertexAttribP1ui";
+ case EntryPoint::GLVertexAttribP1uiv:
+ return "glVertexAttribP1uiv";
+ case EntryPoint::GLVertexAttribP2ui:
+ return "glVertexAttribP2ui";
+ case EntryPoint::GLVertexAttribP2uiv:
+ return "glVertexAttribP2uiv";
+ case EntryPoint::GLVertexAttribP3ui:
+ return "glVertexAttribP3ui";
+ case EntryPoint::GLVertexAttribP3uiv:
+ return "glVertexAttribP3uiv";
+ case EntryPoint::GLVertexAttribP4ui:
+ return "glVertexAttribP4ui";
+ case EntryPoint::GLVertexAttribP4uiv:
+ return "glVertexAttribP4uiv";
+ case EntryPoint::GLVertexAttribPointer:
+ return "glVertexAttribPointer";
+ case EntryPoint::GLVertexBindingDivisor:
+ return "glVertexBindingDivisor";
+ case EntryPoint::GLVertexP2ui:
+ return "glVertexP2ui";
+ case EntryPoint::GLVertexP2uiv:
+ return "glVertexP2uiv";
+ case EntryPoint::GLVertexP3ui:
+ return "glVertexP3ui";
+ case EntryPoint::GLVertexP3uiv:
+ return "glVertexP3uiv";
+ case EntryPoint::GLVertexP4ui:
+ return "glVertexP4ui";
+ case EntryPoint::GLVertexP4uiv:
+ return "glVertexP4uiv";
+ case EntryPoint::GLVertexPointer:
+ return "glVertexPointer";
+ case EntryPoint::GLViewport:
+ return "glViewport";
+ case EntryPoint::GLViewportArrayv:
+ return "glViewportArrayv";
+ case EntryPoint::GLViewportIndexedf:
+ return "glViewportIndexedf";
+ case EntryPoint::GLViewportIndexedfv:
+ return "glViewportIndexedfv";
+ case EntryPoint::GLWaitSemaphoreEXT:
+ return "glWaitSemaphoreEXT";
+ case EntryPoint::GLWaitSync:
+ return "glWaitSync";
+ case EntryPoint::GLWeightPointerOES:
+ return "glWeightPointerOES";
+ case EntryPoint::GLWindowPos2d:
+ return "glWindowPos2d";
+ case EntryPoint::GLWindowPos2dv:
+ return "glWindowPos2dv";
+ case EntryPoint::GLWindowPos2f:
+ return "glWindowPos2f";
+ case EntryPoint::GLWindowPos2fv:
+ return "glWindowPos2fv";
+ case EntryPoint::GLWindowPos2i:
+ return "glWindowPos2i";
+ case EntryPoint::GLWindowPos2iv:
+ return "glWindowPos2iv";
+ case EntryPoint::GLWindowPos2s:
+ return "glWindowPos2s";
+ case EntryPoint::GLWindowPos2sv:
+ return "glWindowPos2sv";
+ case EntryPoint::GLWindowPos3d:
+ return "glWindowPos3d";
+ case EntryPoint::GLWindowPos3dv:
+ return "glWindowPos3dv";
+ case EntryPoint::GLWindowPos3f:
+ return "glWindowPos3f";
+ case EntryPoint::GLWindowPos3fv:
+ return "glWindowPos3fv";
+ case EntryPoint::GLWindowPos3i:
+ return "glWindowPos3i";
+ case EntryPoint::GLWindowPos3iv:
+ return "glWindowPos3iv";
+ case EntryPoint::GLWindowPos3s:
+ return "glWindowPos3s";
+ case EntryPoint::GLWindowPos3sv:
+ return "glWindowPos3sv";
+ case EntryPoint::WGLChoosePixelFormat:
+ return "wglChoosePixelFormat";
+ case EntryPoint::WGLCopyContext:
+ return "wglCopyContext";
+ case EntryPoint::WGLCreateContext:
+ return "wglCreateContext";
+ case EntryPoint::WGLCreateLayerContext:
+ return "wglCreateLayerContext";
+ case EntryPoint::WGLDeleteContext:
+ return "wglDeleteContext";
+ case EntryPoint::WGLDescribeLayerPlane:
+ return "wglDescribeLayerPlane";
+ case EntryPoint::WGLDescribePixelFormat:
+ return "wglDescribePixelFormat";
+ case EntryPoint::WGLGetCurrentContext:
+ return "wglGetCurrentContext";
+ case EntryPoint::WGLGetCurrentDC:
+ return "wglGetCurrentDC";
+ case EntryPoint::WGLGetEnhMetaFilePixelFormat:
+ return "wglGetEnhMetaFilePixelFormat";
+ case EntryPoint::WGLGetLayerPaletteEntries:
+ return "wglGetLayerPaletteEntries";
+ case EntryPoint::WGLGetPixelFormat:
+ return "wglGetPixelFormat";
+ case EntryPoint::WGLGetProcAddress:
+ return "wglGetProcAddress";
+ case EntryPoint::WGLMakeCurrent:
+ return "wglMakeCurrent";
+ case EntryPoint::WGLRealizeLayerPalette:
+ return "wglRealizeLayerPalette";
+ case EntryPoint::WGLSetLayerPaletteEntries:
+ return "wglSetLayerPaletteEntries";
+ case EntryPoint::WGLSetPixelFormat:
+ return "wglSetPixelFormat";
+ case EntryPoint::WGLShareLists:
+ return "wglShareLists";
+ case EntryPoint::WGLSwapBuffers:
+ return "wglSwapBuffers";
+ case EntryPoint::WGLSwapLayerBuffers:
+ return "wglSwapLayerBuffers";
+ case EntryPoint::WGLUseFontBitmaps:
+ return "wglUseFontBitmaps";
+ case EntryPoint::WGLUseFontBitmapsA:
+ return "wglUseFontBitmapsA";
+ case EntryPoint::WGLUseFontBitmapsW:
+ return "wglUseFontBitmapsW";
+ case EntryPoint::WGLUseFontOutlines:
+ return "wglUseFontOutlines";
+ case EntryPoint::WGLUseFontOutlinesA:
+ return "wglUseFontOutlinesA";
+ case EntryPoint::WGLUseFontOutlinesW:
+ return "wglUseFontOutlinesW";
+ default:
+ UNREACHABLE();
+ return "error";
+ }
+}
+} // namespace angle
diff --git a/gfx/angle/checkout/src/common/entry_points_enum_autogen.h b/gfx/angle/checkout/src/common/entry_points_enum_autogen.h
new file mode 100644
index 0000000000..61c5c76f58
--- /dev/null
+++ b/gfx/angle/checkout/src/common/entry_points_enum_autogen.h
@@ -0,0 +1,1736 @@
+// GENERATED FILE - DO NOT EDIT.
+// Generated by generate_entry_points.py using data from gl.xml and gl_angle_ext.xml.
+//
+// Copyright 2020 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// entry_points_enum_autogen.h:
+// Defines the GL/GLES entry points enumeration.
+
+#ifndef COMMON_ENTRYPOINTSENUM_AUTOGEN_H_
+#define COMMON_ENTRYPOINTSENUM_AUTOGEN_H_
+
+namespace angle
+{
+enum class EntryPoint
+{
+ CLBuildProgram,
+ CLCloneKernel,
+ CLCompileProgram,
+ CLCreateBuffer,
+ CLCreateBufferWithProperties,
+ CLCreateCommandQueue,
+ CLCreateCommandQueueWithProperties,
+ CLCreateContext,
+ CLCreateContextFromType,
+ CLCreateImage,
+ CLCreateImage2D,
+ CLCreateImage3D,
+ CLCreateImageWithProperties,
+ CLCreateKernel,
+ CLCreateKernelsInProgram,
+ CLCreatePipe,
+ CLCreateProgramWithBinary,
+ CLCreateProgramWithBuiltInKernels,
+ CLCreateProgramWithIL,
+ CLCreateProgramWithSource,
+ CLCreateSampler,
+ CLCreateSamplerWithProperties,
+ CLCreateSubBuffer,
+ CLCreateSubDevices,
+ CLCreateUserEvent,
+ CLEnqueueBarrier,
+ CLEnqueueBarrierWithWaitList,
+ CLEnqueueCopyBuffer,
+ CLEnqueueCopyBufferRect,
+ CLEnqueueCopyBufferToImage,
+ CLEnqueueCopyImage,
+ CLEnqueueCopyImageToBuffer,
+ CLEnqueueFillBuffer,
+ CLEnqueueFillImage,
+ CLEnqueueMapBuffer,
+ CLEnqueueMapImage,
+ CLEnqueueMarker,
+ CLEnqueueMarkerWithWaitList,
+ CLEnqueueMigrateMemObjects,
+ CLEnqueueNDRangeKernel,
+ CLEnqueueNativeKernel,
+ CLEnqueueReadBuffer,
+ CLEnqueueReadBufferRect,
+ CLEnqueueReadImage,
+ CLEnqueueSVMFree,
+ CLEnqueueSVMMap,
+ CLEnqueueSVMMemFill,
+ CLEnqueueSVMMemcpy,
+ CLEnqueueSVMMigrateMem,
+ CLEnqueueSVMUnmap,
+ CLEnqueueTask,
+ CLEnqueueUnmapMemObject,
+ CLEnqueueWaitForEvents,
+ CLEnqueueWriteBuffer,
+ CLEnqueueWriteBufferRect,
+ CLEnqueueWriteImage,
+ CLFinish,
+ CLFlush,
+ CLGetCommandQueueInfo,
+ CLGetContextInfo,
+ CLGetDeviceAndHostTimer,
+ CLGetDeviceIDs,
+ CLGetDeviceInfo,
+ CLGetEventInfo,
+ CLGetEventProfilingInfo,
+ CLGetExtensionFunctionAddress,
+ CLGetExtensionFunctionAddressForPlatform,
+ CLGetHostTimer,
+ CLGetImageInfo,
+ CLGetKernelArgInfo,
+ CLGetKernelInfo,
+ CLGetKernelSubGroupInfo,
+ CLGetKernelWorkGroupInfo,
+ CLGetMemObjectInfo,
+ CLGetPipeInfo,
+ CLGetPlatformIDs,
+ CLGetPlatformInfo,
+ CLGetProgramBuildInfo,
+ CLGetProgramInfo,
+ CLGetSamplerInfo,
+ CLGetSupportedImageFormats,
+ CLIcdGetPlatformIDsKHR,
+ CLLinkProgram,
+ CLReleaseCommandQueue,
+ CLReleaseContext,
+ CLReleaseDevice,
+ CLReleaseEvent,
+ CLReleaseKernel,
+ CLReleaseMemObject,
+ CLReleaseProgram,
+ CLReleaseSampler,
+ CLRetainCommandQueue,
+ CLRetainContext,
+ CLRetainDevice,
+ CLRetainEvent,
+ CLRetainKernel,
+ CLRetainMemObject,
+ CLRetainProgram,
+ CLRetainSampler,
+ CLSVMAlloc,
+ CLSVMFree,
+ CLSetCommandQueueProperty,
+ CLSetContextDestructorCallback,
+ CLSetDefaultDeviceCommandQueue,
+ CLSetEventCallback,
+ CLSetKernelArg,
+ CLSetKernelArgSVMPointer,
+ CLSetKernelExecInfo,
+ CLSetMemObjectDestructorCallback,
+ CLSetProgramReleaseCallback,
+ CLSetProgramSpecializationConstant,
+ CLSetUserEventStatus,
+ CLUnloadCompiler,
+ CLUnloadPlatformCompiler,
+ CLWaitForEvents,
+ EGLBindAPI,
+ EGLBindTexImage,
+ EGLChooseConfig,
+ EGLClientWaitSync,
+ EGLClientWaitSyncKHR,
+ EGLCopyBuffers,
+ EGLCopyMetalSharedEventANGLE,
+ EGLCreateContext,
+ EGLCreateDeviceANGLE,
+ EGLCreateImage,
+ EGLCreateImageKHR,
+ EGLCreateNativeClientBufferANDROID,
+ EGLCreatePbufferFromClientBuffer,
+ EGLCreatePbufferSurface,
+ EGLCreatePixmapSurface,
+ EGLCreatePlatformPixmapSurface,
+ EGLCreatePlatformPixmapSurfaceEXT,
+ EGLCreatePlatformWindowSurface,
+ EGLCreatePlatformWindowSurfaceEXT,
+ EGLCreateStreamKHR,
+ EGLCreateStreamProducerD3DTextureANGLE,
+ EGLCreateSync,
+ EGLCreateSyncKHR,
+ EGLCreateWindowSurface,
+ EGLDebugMessageControlKHR,
+ EGLDestroyContext,
+ EGLDestroyImage,
+ EGLDestroyImageKHR,
+ EGLDestroyStreamKHR,
+ EGLDestroySurface,
+ EGLDestroySync,
+ EGLDestroySyncKHR,
+ EGLDupNativeFenceFDANDROID,
+ EGLExportVkImageANGLE,
+ EGLForceGPUSwitchANGLE,
+ EGLGetCompositorTimingANDROID,
+ EGLGetCompositorTimingSupportedANDROID,
+ EGLGetConfigAttrib,
+ EGLGetConfigs,
+ EGLGetCurrentContext,
+ EGLGetCurrentDisplay,
+ EGLGetCurrentSurface,
+ EGLGetDisplay,
+ EGLGetError,
+ EGLGetFrameTimestampSupportedANDROID,
+ EGLGetFrameTimestampsANDROID,
+ EGLGetMscRateANGLE,
+ EGLGetNativeClientBufferANDROID,
+ EGLGetNextFrameIdANDROID,
+ EGLGetPlatformDisplay,
+ EGLGetPlatformDisplayEXT,
+ EGLGetProcAddress,
+ EGLGetSyncAttrib,
+ EGLGetSyncAttribKHR,
+ EGLGetSyncValuesCHROMIUM,
+ EGLHandleGPUSwitchANGLE,
+ EGLInitialize,
+ EGLLabelObjectKHR,
+ EGLLockSurfaceKHR,
+ EGLMakeCurrent,
+ EGLPostSubBufferNV,
+ EGLPrepareSwapBuffersANGLE,
+ EGLPresentationTimeANDROID,
+ EGLProgramCacheGetAttribANGLE,
+ EGLProgramCachePopulateANGLE,
+ EGLProgramCacheQueryANGLE,
+ EGLProgramCacheResizeANGLE,
+ EGLQueryAPI,
+ EGLQueryContext,
+ EGLQueryDebugKHR,
+ EGLQueryDeviceAttribEXT,
+ EGLQueryDeviceStringEXT,
+ EGLQueryDisplayAttribANGLE,
+ EGLQueryDisplayAttribEXT,
+ EGLQueryDmaBufFormatsEXT,
+ EGLQueryDmaBufModifiersEXT,
+ EGLQueryStreamKHR,
+ EGLQueryStreamu64KHR,
+ EGLQueryString,
+ EGLQueryStringiANGLE,
+ EGLQuerySurface,
+ EGLQuerySurface64KHR,
+ EGLQuerySurfacePointerANGLE,
+ EGLReacquireHighPowerGPUANGLE,
+ EGLReleaseDeviceANGLE,
+ EGLReleaseHighPowerGPUANGLE,
+ EGLReleaseTexImage,
+ EGLReleaseThread,
+ EGLSetBlobCacheFuncsANDROID,
+ EGLSetDamageRegionKHR,
+ EGLSignalSyncKHR,
+ EGLStreamAttribKHR,
+ EGLStreamConsumerAcquireKHR,
+ EGLStreamConsumerGLTextureExternalAttribsNV,
+ EGLStreamConsumerGLTextureExternalKHR,
+ EGLStreamConsumerReleaseKHR,
+ EGLStreamPostD3DTextureANGLE,
+ EGLSurfaceAttrib,
+ EGLSwapBuffers,
+ EGLSwapBuffersWithDamageKHR,
+ EGLSwapBuffersWithFrameTokenANGLE,
+ EGLSwapInterval,
+ EGLTerminate,
+ EGLUnlockSurfaceKHR,
+ EGLWaitClient,
+ EGLWaitGL,
+ EGLWaitNative,
+ EGLWaitSync,
+ EGLWaitSyncKHR,
+ GLAccum,
+ GLAcquireTexturesANGLE,
+ GLActiveShaderProgram,
+ GLActiveShaderProgramEXT,
+ GLActiveTexture,
+ GLAlphaFunc,
+ GLAlphaFuncx,
+ GLAreTexturesResident,
+ GLArrayElement,
+ GLAttachShader,
+ GLBegin,
+ GLBeginConditionalRender,
+ GLBeginPerfMonitorAMD,
+ GLBeginPixelLocalStorageANGLE,
+ GLBeginQuery,
+ GLBeginQueryEXT,
+ GLBeginQueryIndexed,
+ GLBeginTransformFeedback,
+ GLBindAttribLocation,
+ GLBindBuffer,
+ GLBindBufferBase,
+ GLBindBufferRange,
+ GLBindBuffersBase,
+ GLBindBuffersRange,
+ GLBindFragDataLocation,
+ GLBindFragDataLocationEXT,
+ GLBindFragDataLocationIndexed,
+ GLBindFragDataLocationIndexedEXT,
+ GLBindFramebuffer,
+ GLBindFramebufferOES,
+ GLBindImageTexture,
+ GLBindImageTextures,
+ GLBindProgramPipeline,
+ GLBindProgramPipelineEXT,
+ GLBindRenderbuffer,
+ GLBindRenderbufferOES,
+ GLBindSampler,
+ GLBindSamplers,
+ GLBindTexture,
+ GLBindTextureUnit,
+ GLBindTextures,
+ GLBindTransformFeedback,
+ GLBindUniformLocationCHROMIUM,
+ GLBindVertexArray,
+ GLBindVertexArrayOES,
+ GLBindVertexBuffer,
+ GLBindVertexBuffers,
+ GLBitmap,
+ GLBlendBarrier,
+ GLBlendBarrierKHR,
+ GLBlendColor,
+ GLBlendEquation,
+ GLBlendEquationSeparate,
+ GLBlendEquationSeparatei,
+ GLBlendEquationSeparateiEXT,
+ GLBlendEquationSeparateiOES,
+ GLBlendEquationi,
+ GLBlendEquationiEXT,
+ GLBlendEquationiOES,
+ GLBlendFunc,
+ GLBlendFuncSeparate,
+ GLBlendFuncSeparatei,
+ GLBlendFuncSeparateiEXT,
+ GLBlendFuncSeparateiOES,
+ GLBlendFunci,
+ GLBlendFunciEXT,
+ GLBlendFunciOES,
+ GLBlitFramebuffer,
+ GLBlitFramebufferANGLE,
+ GLBlitFramebufferNV,
+ GLBlitNamedFramebuffer,
+ GLBufferData,
+ GLBufferStorage,
+ GLBufferStorageEXT,
+ GLBufferStorageExternalEXT,
+ GLBufferStorageMemEXT,
+ GLBufferSubData,
+ GLCallList,
+ GLCallLists,
+ GLCheckFramebufferStatus,
+ GLCheckFramebufferStatusOES,
+ GLCheckNamedFramebufferStatus,
+ GLClampColor,
+ GLClear,
+ GLClearAccum,
+ GLClearBufferData,
+ GLClearBufferSubData,
+ GLClearBufferfi,
+ GLClearBufferfv,
+ GLClearBufferiv,
+ GLClearBufferuiv,
+ GLClearColor,
+ GLClearColorx,
+ GLClearDepth,
+ GLClearDepthf,
+ GLClearDepthx,
+ GLClearIndex,
+ GLClearNamedBufferData,
+ GLClearNamedBufferSubData,
+ GLClearNamedFramebufferfi,
+ GLClearNamedFramebufferfv,
+ GLClearNamedFramebufferiv,
+ GLClearNamedFramebufferuiv,
+ GLClearStencil,
+ GLClearTexImage,
+ GLClearTexSubImage,
+ GLClientActiveTexture,
+ GLClientWaitSync,
+ GLClipControl,
+ GLClipControlEXT,
+ GLClipPlane,
+ GLClipPlanef,
+ GLClipPlanex,
+ GLColor3b,
+ GLColor3bv,
+ GLColor3d,
+ GLColor3dv,
+ GLColor3f,
+ GLColor3fv,
+ GLColor3i,
+ GLColor3iv,
+ GLColor3s,
+ GLColor3sv,
+ GLColor3ub,
+ GLColor3ubv,
+ GLColor3ui,
+ GLColor3uiv,
+ GLColor3us,
+ GLColor3usv,
+ GLColor4b,
+ GLColor4bv,
+ GLColor4d,
+ GLColor4dv,
+ GLColor4f,
+ GLColor4fv,
+ GLColor4i,
+ GLColor4iv,
+ GLColor4s,
+ GLColor4sv,
+ GLColor4ub,
+ GLColor4ubv,
+ GLColor4ui,
+ GLColor4uiv,
+ GLColor4us,
+ GLColor4usv,
+ GLColor4x,
+ GLColorMask,
+ GLColorMaski,
+ GLColorMaskiEXT,
+ GLColorMaskiOES,
+ GLColorMaterial,
+ GLColorP3ui,
+ GLColorP3uiv,
+ GLColorP4ui,
+ GLColorP4uiv,
+ GLColorPointer,
+ GLCompileShader,
+ GLCompressedCopyTextureCHROMIUM,
+ GLCompressedTexImage1D,
+ GLCompressedTexImage2D,
+ GLCompressedTexImage2DRobustANGLE,
+ GLCompressedTexImage3D,
+ GLCompressedTexImage3DOES,
+ GLCompressedTexImage3DRobustANGLE,
+ GLCompressedTexSubImage1D,
+ GLCompressedTexSubImage2D,
+ GLCompressedTexSubImage2DRobustANGLE,
+ GLCompressedTexSubImage3D,
+ GLCompressedTexSubImage3DOES,
+ GLCompressedTexSubImage3DRobustANGLE,
+ GLCompressedTextureSubImage1D,
+ GLCompressedTextureSubImage2D,
+ GLCompressedTextureSubImage3D,
+ GLCopyBufferSubData,
+ GLCopyImageSubData,
+ GLCopyImageSubDataEXT,
+ GLCopyImageSubDataOES,
+ GLCopyNamedBufferSubData,
+ GLCopyPixels,
+ GLCopySubTexture3DANGLE,
+ GLCopySubTextureCHROMIUM,
+ GLCopyTexImage1D,
+ GLCopyTexImage2D,
+ GLCopyTexSubImage1D,
+ GLCopyTexSubImage2D,
+ GLCopyTexSubImage3D,
+ GLCopyTexSubImage3DOES,
+ GLCopyTexture3DANGLE,
+ GLCopyTextureCHROMIUM,
+ GLCopyTextureSubImage1D,
+ GLCopyTextureSubImage2D,
+ GLCopyTextureSubImage3D,
+ GLCoverageModulationCHROMIUM,
+ GLCreateBuffers,
+ GLCreateFramebuffers,
+ GLCreateMemoryObjectsEXT,
+ GLCreateProgram,
+ GLCreateProgramPipelines,
+ GLCreateQueries,
+ GLCreateRenderbuffers,
+ GLCreateSamplers,
+ GLCreateShader,
+ GLCreateShaderProgramv,
+ GLCreateShaderProgramvEXT,
+ GLCreateTextures,
+ GLCreateTransformFeedbacks,
+ GLCreateVertexArrays,
+ GLCullFace,
+ GLCurrentPaletteMatrixOES,
+ GLDebugMessageCallback,
+ GLDebugMessageCallbackKHR,
+ GLDebugMessageControl,
+ GLDebugMessageControlKHR,
+ GLDebugMessageInsert,
+ GLDebugMessageInsertKHR,
+ GLDeleteBuffers,
+ GLDeleteFencesNV,
+ GLDeleteFramebuffers,
+ GLDeleteFramebuffersOES,
+ GLDeleteLists,
+ GLDeleteMemoryObjectsEXT,
+ GLDeletePerfMonitorsAMD,
+ GLDeleteProgram,
+ GLDeleteProgramPipelines,
+ GLDeleteProgramPipelinesEXT,
+ GLDeleteQueries,
+ GLDeleteQueriesEXT,
+ GLDeleteRenderbuffers,
+ GLDeleteRenderbuffersOES,
+ GLDeleteSamplers,
+ GLDeleteSemaphoresEXT,
+ GLDeleteShader,
+ GLDeleteSync,
+ GLDeleteTextures,
+ GLDeleteTransformFeedbacks,
+ GLDeleteVertexArrays,
+ GLDeleteVertexArraysOES,
+ GLDepthFunc,
+ GLDepthMask,
+ GLDepthRange,
+ GLDepthRangeArrayv,
+ GLDepthRangeIndexed,
+ GLDepthRangef,
+ GLDepthRangex,
+ GLDetachShader,
+ GLDisable,
+ GLDisableClientState,
+ GLDisableExtensionANGLE,
+ GLDisableVertexArrayAttrib,
+ GLDisableVertexAttribArray,
+ GLDisablei,
+ GLDisableiEXT,
+ GLDisableiOES,
+ GLDiscardFramebufferEXT,
+ GLDispatchCompute,
+ GLDispatchComputeIndirect,
+ GLDrawArrays,
+ GLDrawArraysIndirect,
+ GLDrawArraysInstanced,
+ GLDrawArraysInstancedANGLE,
+ GLDrawArraysInstancedBaseInstance,
+ GLDrawArraysInstancedBaseInstanceANGLE,
+ GLDrawArraysInstancedBaseInstanceEXT,
+ GLDrawArraysInstancedEXT,
+ GLDrawBuffer,
+ GLDrawBuffers,
+ GLDrawBuffersEXT,
+ GLDrawElements,
+ GLDrawElementsBaseVertex,
+ GLDrawElementsBaseVertexEXT,
+ GLDrawElementsBaseVertexOES,
+ GLDrawElementsIndirect,
+ GLDrawElementsInstanced,
+ GLDrawElementsInstancedANGLE,
+ GLDrawElementsInstancedBaseInstance,
+ GLDrawElementsInstancedBaseInstanceEXT,
+ GLDrawElementsInstancedBaseVertex,
+ GLDrawElementsInstancedBaseVertexBaseInstance,
+ GLDrawElementsInstancedBaseVertexBaseInstanceANGLE,
+ GLDrawElementsInstancedBaseVertexBaseInstanceEXT,
+ GLDrawElementsInstancedBaseVertexEXT,
+ GLDrawElementsInstancedBaseVertexOES,
+ GLDrawElementsInstancedEXT,
+ GLDrawPixels,
+ GLDrawRangeElements,
+ GLDrawRangeElementsBaseVertex,
+ GLDrawRangeElementsBaseVertexEXT,
+ GLDrawRangeElementsBaseVertexOES,
+ GLDrawTexfOES,
+ GLDrawTexfvOES,
+ GLDrawTexiOES,
+ GLDrawTexivOES,
+ GLDrawTexsOES,
+ GLDrawTexsvOES,
+ GLDrawTexxOES,
+ GLDrawTexxvOES,
+ GLDrawTransformFeedback,
+ GLDrawTransformFeedbackInstanced,
+ GLDrawTransformFeedbackStream,
+ GLDrawTransformFeedbackStreamInstanced,
+ GLEGLImageTargetRenderbufferStorageOES,
+ GLEGLImageTargetTexStorageEXT,
+ GLEGLImageTargetTexture2DOES,
+ GLEGLImageTargetTextureStorageEXT,
+ GLEdgeFlag,
+ GLEdgeFlagPointer,
+ GLEdgeFlagv,
+ GLEnable,
+ GLEnableClientState,
+ GLEnableVertexArrayAttrib,
+ GLEnableVertexAttribArray,
+ GLEnablei,
+ GLEnableiEXT,
+ GLEnableiOES,
+ GLEnd,
+ GLEndConditionalRender,
+ GLEndList,
+ GLEndPerfMonitorAMD,
+ GLEndPixelLocalStorageANGLE,
+ GLEndQuery,
+ GLEndQueryEXT,
+ GLEndQueryIndexed,
+ GLEndTransformFeedback,
+ GLEvalCoord1d,
+ GLEvalCoord1dv,
+ GLEvalCoord1f,
+ GLEvalCoord1fv,
+ GLEvalCoord2d,
+ GLEvalCoord2dv,
+ GLEvalCoord2f,
+ GLEvalCoord2fv,
+ GLEvalMesh1,
+ GLEvalMesh2,
+ GLEvalPoint1,
+ GLEvalPoint2,
+ GLFeedbackBuffer,
+ GLFenceSync,
+ GLFinish,
+ GLFinishFenceNV,
+ GLFlush,
+ GLFlushMappedBufferRange,
+ GLFlushMappedBufferRangeEXT,
+ GLFlushMappedNamedBufferRange,
+ GLFogCoordPointer,
+ GLFogCoordd,
+ GLFogCoorddv,
+ GLFogCoordf,
+ GLFogCoordfv,
+ GLFogf,
+ GLFogfv,
+ GLFogi,
+ GLFogiv,
+ GLFogx,
+ GLFogxv,
+ GLFramebufferFetchBarrierEXT,
+ GLFramebufferMemorylessPixelLocalStorageANGLE,
+ GLFramebufferParameteri,
+ GLFramebufferParameteriMESA,
+ GLFramebufferRenderbuffer,
+ GLFramebufferRenderbufferOES,
+ GLFramebufferTexture,
+ GLFramebufferTexture1D,
+ GLFramebufferTexture2D,
+ GLFramebufferTexture2DMultisampleEXT,
+ GLFramebufferTexture2DOES,
+ GLFramebufferTexture3D,
+ GLFramebufferTexture3DOES,
+ GLFramebufferTextureEXT,
+ GLFramebufferTextureLayer,
+ GLFramebufferTextureMultiviewOVR,
+ GLFramebufferTextureOES,
+ GLFramebufferTexturePixelLocalStorageANGLE,
+ GLFrontFace,
+ GLFrustum,
+ GLFrustumf,
+ GLFrustumx,
+ GLGenBuffers,
+ GLGenFencesNV,
+ GLGenFramebuffers,
+ GLGenFramebuffersOES,
+ GLGenLists,
+ GLGenPerfMonitorsAMD,
+ GLGenProgramPipelines,
+ GLGenProgramPipelinesEXT,
+ GLGenQueries,
+ GLGenQueriesEXT,
+ GLGenRenderbuffers,
+ GLGenRenderbuffersOES,
+ GLGenSamplers,
+ GLGenSemaphoresEXT,
+ GLGenTextures,
+ GLGenTransformFeedbacks,
+ GLGenVertexArrays,
+ GLGenVertexArraysOES,
+ GLGenerateMipmap,
+ GLGenerateMipmapOES,
+ GLGenerateTextureMipmap,
+ GLGetActiveAtomicCounterBufferiv,
+ GLGetActiveAttrib,
+ GLGetActiveSubroutineName,
+ GLGetActiveSubroutineUniformName,
+ GLGetActiveSubroutineUniformiv,
+ GLGetActiveUniform,
+ GLGetActiveUniformBlockName,
+ GLGetActiveUniformBlockiv,
+ GLGetActiveUniformBlockivRobustANGLE,
+ GLGetActiveUniformName,
+ GLGetActiveUniformsiv,
+ GLGetAttachedShaders,
+ GLGetAttribLocation,
+ GLGetBooleani_v,
+ GLGetBooleani_vRobustANGLE,
+ GLGetBooleanv,
+ GLGetBooleanvRobustANGLE,
+ GLGetBufferParameteri64v,
+ GLGetBufferParameteri64vRobustANGLE,
+ GLGetBufferParameteriv,
+ GLGetBufferParameterivRobustANGLE,
+ GLGetBufferPointerv,
+ GLGetBufferPointervOES,
+ GLGetBufferPointervRobustANGLE,
+ GLGetBufferSubData,
+ GLGetClipPlane,
+ GLGetClipPlanef,
+ GLGetClipPlanex,
+ GLGetCompressedTexImage,
+ GLGetCompressedTexImageANGLE,
+ GLGetCompressedTextureImage,
+ GLGetCompressedTextureSubImage,
+ GLGetDebugMessageLog,
+ GLGetDebugMessageLogKHR,
+ GLGetDoublei_v,
+ GLGetDoublev,
+ GLGetError,
+ GLGetFenceivNV,
+ GLGetFixedv,
+ GLGetFloati_v,
+ GLGetFloatv,
+ GLGetFloatvRobustANGLE,
+ GLGetFragDataIndex,
+ GLGetFragDataIndexEXT,
+ GLGetFragDataLocation,
+ GLGetFramebufferAttachmentParameteriv,
+ GLGetFramebufferAttachmentParameterivOES,
+ GLGetFramebufferAttachmentParameterivRobustANGLE,
+ GLGetFramebufferParameteriv,
+ GLGetFramebufferParameterivMESA,
+ GLGetFramebufferParameterivRobustANGLE,
+ GLGetGraphicsResetStatus,
+ GLGetGraphicsResetStatusEXT,
+ GLGetInteger64i_v,
+ GLGetInteger64i_vRobustANGLE,
+ GLGetInteger64v,
+ GLGetInteger64vEXT,
+ GLGetInteger64vRobustANGLE,
+ GLGetIntegeri_v,
+ GLGetIntegeri_vRobustANGLE,
+ GLGetIntegerv,
+ GLGetIntegervRobustANGLE,
+ GLGetInternalformati64v,
+ GLGetInternalformativ,
+ GLGetInternalformativRobustANGLE,
+ GLGetLightfv,
+ GLGetLightiv,
+ GLGetLightxv,
+ GLGetMapdv,
+ GLGetMapfv,
+ GLGetMapiv,
+ GLGetMaterialfv,
+ GLGetMaterialiv,
+ GLGetMaterialxv,
+ GLGetMemoryObjectParameterivEXT,
+ GLGetMultisamplefv,
+ GLGetMultisamplefvANGLE,
+ GLGetMultisamplefvRobustANGLE,
+ GLGetNamedBufferParameteri64v,
+ GLGetNamedBufferParameteriv,
+ GLGetNamedBufferPointerv,
+ GLGetNamedBufferSubData,
+ GLGetNamedFramebufferAttachmentParameteriv,
+ GLGetNamedFramebufferParameteriv,
+ GLGetNamedRenderbufferParameteriv,
+ GLGetObjectLabel,
+ GLGetObjectLabelEXT,
+ GLGetObjectLabelKHR,
+ GLGetObjectPtrLabel,
+ GLGetObjectPtrLabelKHR,
+ GLGetPerfMonitorCounterDataAMD,
+ GLGetPerfMonitorCounterInfoAMD,
+ GLGetPerfMonitorCounterStringAMD,
+ GLGetPerfMonitorCountersAMD,
+ GLGetPerfMonitorGroupStringAMD,
+ GLGetPerfMonitorGroupsAMD,
+ GLGetPixelMapfv,
+ GLGetPixelMapuiv,
+ GLGetPixelMapusv,
+ GLGetPointerv,
+ GLGetPointervKHR,
+ GLGetPointervRobustANGLERobustANGLE,
+ GLGetPolygonStipple,
+ GLGetProgramBinary,
+ GLGetProgramBinaryOES,
+ GLGetProgramInfoLog,
+ GLGetProgramInterfaceiv,
+ GLGetProgramInterfaceivRobustANGLE,
+ GLGetProgramPipelineInfoLog,
+ GLGetProgramPipelineInfoLogEXT,
+ GLGetProgramPipelineiv,
+ GLGetProgramPipelineivEXT,
+ GLGetProgramResourceIndex,
+ GLGetProgramResourceLocation,
+ GLGetProgramResourceLocationIndex,
+ GLGetProgramResourceLocationIndexEXT,
+ GLGetProgramResourceName,
+ GLGetProgramResourceiv,
+ GLGetProgramStageiv,
+ GLGetProgramiv,
+ GLGetProgramivRobustANGLE,
+ GLGetQueryBufferObjecti64v,
+ GLGetQueryBufferObjectiv,
+ GLGetQueryBufferObjectui64v,
+ GLGetQueryBufferObjectuiv,
+ GLGetQueryIndexediv,
+ GLGetQueryObjecti64v,
+ GLGetQueryObjecti64vEXT,
+ GLGetQueryObjecti64vRobustANGLE,
+ GLGetQueryObjectiv,
+ GLGetQueryObjectivEXT,
+ GLGetQueryObjectivRobustANGLE,
+ GLGetQueryObjectui64v,
+ GLGetQueryObjectui64vEXT,
+ GLGetQueryObjectui64vRobustANGLE,
+ GLGetQueryObjectuiv,
+ GLGetQueryObjectuivEXT,
+ GLGetQueryObjectuivRobustANGLE,
+ GLGetQueryiv,
+ GLGetQueryivEXT,
+ GLGetQueryivRobustANGLE,
+ GLGetRenderbufferImageANGLE,
+ GLGetRenderbufferParameteriv,
+ GLGetRenderbufferParameterivOES,
+ GLGetRenderbufferParameterivRobustANGLE,
+ GLGetSamplerParameterIiv,
+ GLGetSamplerParameterIivEXT,
+ GLGetSamplerParameterIivOES,
+ GLGetSamplerParameterIivRobustANGLE,
+ GLGetSamplerParameterIuiv,
+ GLGetSamplerParameterIuivEXT,
+ GLGetSamplerParameterIuivOES,
+ GLGetSamplerParameterIuivRobustANGLE,
+ GLGetSamplerParameterfv,
+ GLGetSamplerParameterfvRobustANGLE,
+ GLGetSamplerParameteriv,
+ GLGetSamplerParameterivRobustANGLE,
+ GLGetSemaphoreParameterui64vEXT,
+ GLGetShaderInfoLog,
+ GLGetShaderPrecisionFormat,
+ GLGetShaderSource,
+ GLGetShaderiv,
+ GLGetShaderivRobustANGLE,
+ GLGetString,
+ GLGetStringi,
+ GLGetSubroutineIndex,
+ GLGetSubroutineUniformLocation,
+ GLGetSynciv,
+ GLGetTexEnvfv,
+ GLGetTexEnviv,
+ GLGetTexEnvxv,
+ GLGetTexGendv,
+ GLGetTexGenfv,
+ GLGetTexGenfvOES,
+ GLGetTexGeniv,
+ GLGetTexGenivOES,
+ GLGetTexGenxvOES,
+ GLGetTexImage,
+ GLGetTexImageANGLE,
+ GLGetTexLevelParameterfv,
+ GLGetTexLevelParameterfvANGLE,
+ GLGetTexLevelParameterfvRobustANGLE,
+ GLGetTexLevelParameteriv,
+ GLGetTexLevelParameterivANGLE,
+ GLGetTexLevelParameterivRobustANGLE,
+ GLGetTexParameterIiv,
+ GLGetTexParameterIivEXT,
+ GLGetTexParameterIivOES,
+ GLGetTexParameterIivRobustANGLE,
+ GLGetTexParameterIuiv,
+ GLGetTexParameterIuivEXT,
+ GLGetTexParameterIuivOES,
+ GLGetTexParameterIuivRobustANGLE,
+ GLGetTexParameterfv,
+ GLGetTexParameterfvRobustANGLE,
+ GLGetTexParameteriv,
+ GLGetTexParameterivRobustANGLE,
+ GLGetTexParameterxv,
+ GLGetTextureImage,
+ GLGetTextureLevelParameterfv,
+ GLGetTextureLevelParameteriv,
+ GLGetTextureParameterIiv,
+ GLGetTextureParameterIuiv,
+ GLGetTextureParameterfv,
+ GLGetTextureParameteriv,
+ GLGetTextureSubImage,
+ GLGetTransformFeedbackVarying,
+ GLGetTransformFeedbacki64_v,
+ GLGetTransformFeedbacki_v,
+ GLGetTransformFeedbackiv,
+ GLGetTranslatedShaderSourceANGLE,
+ GLGetUniformBlockIndex,
+ GLGetUniformIndices,
+ GLGetUniformLocation,
+ GLGetUniformSubroutineuiv,
+ GLGetUniformdv,
+ GLGetUniformfv,
+ GLGetUniformfvRobustANGLE,
+ GLGetUniformiv,
+ GLGetUniformivRobustANGLE,
+ GLGetUniformuiv,
+ GLGetUniformuivRobustANGLE,
+ GLGetUnsignedBytei_vEXT,
+ GLGetUnsignedBytevEXT,
+ GLGetVertexArrayIndexed64iv,
+ GLGetVertexArrayIndexediv,
+ GLGetVertexArrayiv,
+ GLGetVertexAttribIiv,
+ GLGetVertexAttribIivRobustANGLE,
+ GLGetVertexAttribIuiv,
+ GLGetVertexAttribIuivRobustANGLE,
+ GLGetVertexAttribLdv,
+ GLGetVertexAttribPointerv,
+ GLGetVertexAttribPointervRobustANGLE,
+ GLGetVertexAttribdv,
+ GLGetVertexAttribfv,
+ GLGetVertexAttribfvRobustANGLE,
+ GLGetVertexAttribiv,
+ GLGetVertexAttribivRobustANGLE,
+ GLGetnColorTable,
+ GLGetnCompressedTexImage,
+ GLGetnConvolutionFilter,
+ GLGetnHistogram,
+ GLGetnMapdv,
+ GLGetnMapfv,
+ GLGetnMapiv,
+ GLGetnMinmax,
+ GLGetnPixelMapfv,
+ GLGetnPixelMapuiv,
+ GLGetnPixelMapusv,
+ GLGetnPolygonStipple,
+ GLGetnSeparableFilter,
+ GLGetnTexImage,
+ GLGetnUniformdv,
+ GLGetnUniformfv,
+ GLGetnUniformfvEXT,
+ GLGetnUniformfvRobustANGLE,
+ GLGetnUniformiv,
+ GLGetnUniformivEXT,
+ GLGetnUniformivRobustANGLE,
+ GLGetnUniformuiv,
+ GLGetnUniformuivRobustANGLE,
+ GLHint,
+ GLImportMemoryFdEXT,
+ GLImportMemoryZirconHandleANGLE,
+ GLImportSemaphoreFdEXT,
+ GLImportSemaphoreZirconHandleANGLE,
+ GLIndexMask,
+ GLIndexPointer,
+ GLIndexd,
+ GLIndexdv,
+ GLIndexf,
+ GLIndexfv,
+ GLIndexi,
+ GLIndexiv,
+ GLIndexs,
+ GLIndexsv,
+ GLIndexub,
+ GLIndexubv,
+ GLInitNames,
+ GLInsertEventMarkerEXT,
+ GLInterleavedArrays,
+ GLInvalid,
+ GLInvalidateBufferData,
+ GLInvalidateBufferSubData,
+ GLInvalidateFramebuffer,
+ GLInvalidateNamedFramebufferData,
+ GLInvalidateNamedFramebufferSubData,
+ GLInvalidateSubFramebuffer,
+ GLInvalidateTexImage,
+ GLInvalidateTexSubImage,
+ GLInvalidateTextureANGLE,
+ GLIsBuffer,
+ GLIsEnabled,
+ GLIsEnabledi,
+ GLIsEnablediEXT,
+ GLIsEnablediOES,
+ GLIsFenceNV,
+ GLIsFramebuffer,
+ GLIsFramebufferOES,
+ GLIsList,
+ GLIsMemoryObjectEXT,
+ GLIsProgram,
+ GLIsProgramPipeline,
+ GLIsProgramPipelineEXT,
+ GLIsQuery,
+ GLIsQueryEXT,
+ GLIsRenderbuffer,
+ GLIsRenderbufferOES,
+ GLIsSampler,
+ GLIsSemaphoreEXT,
+ GLIsShader,
+ GLIsSync,
+ GLIsTexture,
+ GLIsTransformFeedback,
+ GLIsVertexArray,
+ GLIsVertexArrayOES,
+ GLLabelObjectEXT,
+ GLLightModelf,
+ GLLightModelfv,
+ GLLightModeli,
+ GLLightModeliv,
+ GLLightModelx,
+ GLLightModelxv,
+ GLLightf,
+ GLLightfv,
+ GLLighti,
+ GLLightiv,
+ GLLightx,
+ GLLightxv,
+ GLLineStipple,
+ GLLineWidth,
+ GLLineWidthx,
+ GLLinkProgram,
+ GLListBase,
+ GLLoadIdentity,
+ GLLoadMatrixd,
+ GLLoadMatrixf,
+ GLLoadMatrixx,
+ GLLoadName,
+ GLLoadPaletteFromModelViewMatrixOES,
+ GLLoadTransposeMatrixd,
+ GLLoadTransposeMatrixf,
+ GLLogicOp,
+ GLLogicOpANGLE,
+ GLLoseContextCHROMIUM,
+ GLMap1d,
+ GLMap1f,
+ GLMap2d,
+ GLMap2f,
+ GLMapBuffer,
+ GLMapBufferOES,
+ GLMapBufferRange,
+ GLMapBufferRangeEXT,
+ GLMapGrid1d,
+ GLMapGrid1f,
+ GLMapGrid2d,
+ GLMapGrid2f,
+ GLMapNamedBuffer,
+ GLMapNamedBufferRange,
+ GLMaterialf,
+ GLMaterialfv,
+ GLMateriali,
+ GLMaterialiv,
+ GLMaterialx,
+ GLMaterialxv,
+ GLMatrixIndexPointerOES,
+ GLMatrixMode,
+ GLMaxShaderCompilerThreadsKHR,
+ GLMemoryBarrier,
+ GLMemoryBarrierByRegion,
+ GLMemoryObjectParameterivEXT,
+ GLMinSampleShading,
+ GLMinSampleShadingOES,
+ GLMultMatrixd,
+ GLMultMatrixf,
+ GLMultMatrixx,
+ GLMultTransposeMatrixd,
+ GLMultTransposeMatrixf,
+ GLMultiDrawArrays,
+ GLMultiDrawArraysANGLE,
+ GLMultiDrawArraysIndirect,
+ GLMultiDrawArraysIndirectCount,
+ GLMultiDrawArraysIndirectEXT,
+ GLMultiDrawArraysInstancedANGLE,
+ GLMultiDrawArraysInstancedBaseInstanceANGLE,
+ GLMultiDrawElements,
+ GLMultiDrawElementsANGLE,
+ GLMultiDrawElementsBaseVertex,
+ GLMultiDrawElementsBaseVertexEXT,
+ GLMultiDrawElementsIndirect,
+ GLMultiDrawElementsIndirectCount,
+ GLMultiDrawElementsIndirectEXT,
+ GLMultiDrawElementsInstancedANGLE,
+ GLMultiDrawElementsInstancedBaseVertexBaseInstanceANGLE,
+ GLMultiTexCoord1d,
+ GLMultiTexCoord1dv,
+ GLMultiTexCoord1f,
+ GLMultiTexCoord1fv,
+ GLMultiTexCoord1i,
+ GLMultiTexCoord1iv,
+ GLMultiTexCoord1s,
+ GLMultiTexCoord1sv,
+ GLMultiTexCoord2d,
+ GLMultiTexCoord2dv,
+ GLMultiTexCoord2f,
+ GLMultiTexCoord2fv,
+ GLMultiTexCoord2i,
+ GLMultiTexCoord2iv,
+ GLMultiTexCoord2s,
+ GLMultiTexCoord2sv,
+ GLMultiTexCoord3d,
+ GLMultiTexCoord3dv,
+ GLMultiTexCoord3f,
+ GLMultiTexCoord3fv,
+ GLMultiTexCoord3i,
+ GLMultiTexCoord3iv,
+ GLMultiTexCoord3s,
+ GLMultiTexCoord3sv,
+ GLMultiTexCoord4d,
+ GLMultiTexCoord4dv,
+ GLMultiTexCoord4f,
+ GLMultiTexCoord4fv,
+ GLMultiTexCoord4i,
+ GLMultiTexCoord4iv,
+ GLMultiTexCoord4s,
+ GLMultiTexCoord4sv,
+ GLMultiTexCoord4x,
+ GLMultiTexCoordP1ui,
+ GLMultiTexCoordP1uiv,
+ GLMultiTexCoordP2ui,
+ GLMultiTexCoordP2uiv,
+ GLMultiTexCoordP3ui,
+ GLMultiTexCoordP3uiv,
+ GLMultiTexCoordP4ui,
+ GLMultiTexCoordP4uiv,
+ GLNamedBufferData,
+ GLNamedBufferStorage,
+ GLNamedBufferStorageExternalEXT,
+ GLNamedBufferSubData,
+ GLNamedFramebufferDrawBuffer,
+ GLNamedFramebufferDrawBuffers,
+ GLNamedFramebufferParameteri,
+ GLNamedFramebufferReadBuffer,
+ GLNamedFramebufferRenderbuffer,
+ GLNamedFramebufferTexture,
+ GLNamedFramebufferTextureLayer,
+ GLNamedRenderbufferStorage,
+ GLNamedRenderbufferStorageMultisample,
+ GLNewList,
+ GLNormal3b,
+ GLNormal3bv,
+ GLNormal3d,
+ GLNormal3dv,
+ GLNormal3f,
+ GLNormal3fv,
+ GLNormal3i,
+ GLNormal3iv,
+ GLNormal3s,
+ GLNormal3sv,
+ GLNormal3x,
+ GLNormalP3ui,
+ GLNormalP3uiv,
+ GLNormalPointer,
+ GLObjectLabel,
+ GLObjectLabelKHR,
+ GLObjectPtrLabel,
+ GLObjectPtrLabelKHR,
+ GLOrtho,
+ GLOrthof,
+ GLOrthox,
+ GLPassThrough,
+ GLPatchParameterfv,
+ GLPatchParameteri,
+ GLPatchParameteriEXT,
+ GLPauseTransformFeedback,
+ GLPixelLocalStorageBarrierANGLE,
+ GLPixelMapfv,
+ GLPixelMapuiv,
+ GLPixelMapusv,
+ GLPixelStoref,
+ GLPixelStorei,
+ GLPixelTransferf,
+ GLPixelTransferi,
+ GLPixelZoom,
+ GLPointParameterf,
+ GLPointParameterfv,
+ GLPointParameteri,
+ GLPointParameteriv,
+ GLPointParameterx,
+ GLPointParameterxv,
+ GLPointSize,
+ GLPointSizePointerOES,
+ GLPointSizex,
+ GLPolygonMode,
+ GLPolygonOffset,
+ GLPolygonOffsetClamp,
+ GLPolygonOffsetx,
+ GLPolygonStipple,
+ GLPopAttrib,
+ GLPopClientAttrib,
+ GLPopDebugGroup,
+ GLPopDebugGroupKHR,
+ GLPopGroupMarkerEXT,
+ GLPopMatrix,
+ GLPopName,
+ GLPrimitiveBoundingBox,
+ GLPrimitiveBoundingBoxEXT,
+ GLPrimitiveBoundingBoxOES,
+ GLPrimitiveRestartIndex,
+ GLPrioritizeTextures,
+ GLProgramBinary,
+ GLProgramBinaryOES,
+ GLProgramParameteri,
+ GLProgramParameteriEXT,
+ GLProgramUniform1d,
+ GLProgramUniform1dv,
+ GLProgramUniform1f,
+ GLProgramUniform1fEXT,
+ GLProgramUniform1fv,
+ GLProgramUniform1fvEXT,
+ GLProgramUniform1i,
+ GLProgramUniform1iEXT,
+ GLProgramUniform1iv,
+ GLProgramUniform1ivEXT,
+ GLProgramUniform1ui,
+ GLProgramUniform1uiEXT,
+ GLProgramUniform1uiv,
+ GLProgramUniform1uivEXT,
+ GLProgramUniform2d,
+ GLProgramUniform2dv,
+ GLProgramUniform2f,
+ GLProgramUniform2fEXT,
+ GLProgramUniform2fv,
+ GLProgramUniform2fvEXT,
+ GLProgramUniform2i,
+ GLProgramUniform2iEXT,
+ GLProgramUniform2iv,
+ GLProgramUniform2ivEXT,
+ GLProgramUniform2ui,
+ GLProgramUniform2uiEXT,
+ GLProgramUniform2uiv,
+ GLProgramUniform2uivEXT,
+ GLProgramUniform3d,
+ GLProgramUniform3dv,
+ GLProgramUniform3f,
+ GLProgramUniform3fEXT,
+ GLProgramUniform3fv,
+ GLProgramUniform3fvEXT,
+ GLProgramUniform3i,
+ GLProgramUniform3iEXT,
+ GLProgramUniform3iv,
+ GLProgramUniform3ivEXT,
+ GLProgramUniform3ui,
+ GLProgramUniform3uiEXT,
+ GLProgramUniform3uiv,
+ GLProgramUniform3uivEXT,
+ GLProgramUniform4d,
+ GLProgramUniform4dv,
+ GLProgramUniform4f,
+ GLProgramUniform4fEXT,
+ GLProgramUniform4fv,
+ GLProgramUniform4fvEXT,
+ GLProgramUniform4i,
+ GLProgramUniform4iEXT,
+ GLProgramUniform4iv,
+ GLProgramUniform4ivEXT,
+ GLProgramUniform4ui,
+ GLProgramUniform4uiEXT,
+ GLProgramUniform4uiv,
+ GLProgramUniform4uivEXT,
+ GLProgramUniformMatrix2dv,
+ GLProgramUniformMatrix2fv,
+ GLProgramUniformMatrix2fvEXT,
+ GLProgramUniformMatrix2x3dv,
+ GLProgramUniformMatrix2x3fv,
+ GLProgramUniformMatrix2x3fvEXT,
+ GLProgramUniformMatrix2x4dv,
+ GLProgramUniformMatrix2x4fv,
+ GLProgramUniformMatrix2x4fvEXT,
+ GLProgramUniformMatrix3dv,
+ GLProgramUniformMatrix3fv,
+ GLProgramUniformMatrix3fvEXT,
+ GLProgramUniformMatrix3x2dv,
+ GLProgramUniformMatrix3x2fv,
+ GLProgramUniformMatrix3x2fvEXT,
+ GLProgramUniformMatrix3x4dv,
+ GLProgramUniformMatrix3x4fv,
+ GLProgramUniformMatrix3x4fvEXT,
+ GLProgramUniformMatrix4dv,
+ GLProgramUniformMatrix4fv,
+ GLProgramUniformMatrix4fvEXT,
+ GLProgramUniformMatrix4x2dv,
+ GLProgramUniformMatrix4x2fv,
+ GLProgramUniformMatrix4x2fvEXT,
+ GLProgramUniformMatrix4x3dv,
+ GLProgramUniformMatrix4x3fv,
+ GLProgramUniformMatrix4x3fvEXT,
+ GLProvokingVertex,
+ GLProvokingVertexANGLE,
+ GLPushAttrib,
+ GLPushClientAttrib,
+ GLPushDebugGroup,
+ GLPushDebugGroupKHR,
+ GLPushGroupMarkerEXT,
+ GLPushMatrix,
+ GLPushName,
+ GLQueryCounter,
+ GLQueryCounterEXT,
+ GLQueryMatrixxOES,
+ GLRasterPos2d,
+ GLRasterPos2dv,
+ GLRasterPos2f,
+ GLRasterPos2fv,
+ GLRasterPos2i,
+ GLRasterPos2iv,
+ GLRasterPos2s,
+ GLRasterPos2sv,
+ GLRasterPos3d,
+ GLRasterPos3dv,
+ GLRasterPos3f,
+ GLRasterPos3fv,
+ GLRasterPos3i,
+ GLRasterPos3iv,
+ GLRasterPos3s,
+ GLRasterPos3sv,
+ GLRasterPos4d,
+ GLRasterPos4dv,
+ GLRasterPos4f,
+ GLRasterPos4fv,
+ GLRasterPos4i,
+ GLRasterPos4iv,
+ GLRasterPos4s,
+ GLRasterPos4sv,
+ GLReadBuffer,
+ GLReadPixels,
+ GLReadPixelsRobustANGLE,
+ GLReadnPixels,
+ GLReadnPixelsEXT,
+ GLReadnPixelsRobustANGLE,
+ GLRectd,
+ GLRectdv,
+ GLRectf,
+ GLRectfv,
+ GLRecti,
+ GLRectiv,
+ GLRects,
+ GLRectsv,
+ GLReleaseShaderCompiler,
+ GLReleaseTexturesANGLE,
+ GLRenderMode,
+ GLRenderbufferStorage,
+ GLRenderbufferStorageMultisample,
+ GLRenderbufferStorageMultisampleANGLE,
+ GLRenderbufferStorageMultisampleEXT,
+ GLRenderbufferStorageOES,
+ GLRequestExtensionANGLE,
+ GLResumeTransformFeedback,
+ GLRotated,
+ GLRotatef,
+ GLRotatex,
+ GLSampleCoverage,
+ GLSampleCoveragex,
+ GLSampleMaski,
+ GLSampleMaskiANGLE,
+ GLSamplerParameterIiv,
+ GLSamplerParameterIivEXT,
+ GLSamplerParameterIivOES,
+ GLSamplerParameterIivRobustANGLE,
+ GLSamplerParameterIuiv,
+ GLSamplerParameterIuivEXT,
+ GLSamplerParameterIuivOES,
+ GLSamplerParameterIuivRobustANGLE,
+ GLSamplerParameterf,
+ GLSamplerParameterfv,
+ GLSamplerParameterfvRobustANGLE,
+ GLSamplerParameteri,
+ GLSamplerParameteriv,
+ GLSamplerParameterivRobustANGLE,
+ GLScaled,
+ GLScalef,
+ GLScalex,
+ GLScissor,
+ GLScissorArrayv,
+ GLScissorIndexed,
+ GLScissorIndexedv,
+ GLSecondaryColor3b,
+ GLSecondaryColor3bv,
+ GLSecondaryColor3d,
+ GLSecondaryColor3dv,
+ GLSecondaryColor3f,
+ GLSecondaryColor3fv,
+ GLSecondaryColor3i,
+ GLSecondaryColor3iv,
+ GLSecondaryColor3s,
+ GLSecondaryColor3sv,
+ GLSecondaryColor3ub,
+ GLSecondaryColor3ubv,
+ GLSecondaryColor3ui,
+ GLSecondaryColor3uiv,
+ GLSecondaryColor3us,
+ GLSecondaryColor3usv,
+ GLSecondaryColorP3ui,
+ GLSecondaryColorP3uiv,
+ GLSecondaryColorPointer,
+ GLSelectBuffer,
+ GLSelectPerfMonitorCountersAMD,
+ GLSemaphoreParameterui64vEXT,
+ GLSetFenceNV,
+ GLShadeModel,
+ GLShaderBinary,
+ GLShaderSource,
+ GLShaderStorageBlockBinding,
+ GLShadingRateQCOM,
+ GLSignalSemaphoreEXT,
+ GLSpecializeShader,
+ GLStencilFunc,
+ GLStencilFuncSeparate,
+ GLStencilMask,
+ GLStencilMaskSeparate,
+ GLStencilOp,
+ GLStencilOpSeparate,
+ GLTestFenceNV,
+ GLTexBuffer,
+ GLTexBufferEXT,
+ GLTexBufferOES,
+ GLTexBufferRange,
+ GLTexBufferRangeEXT,
+ GLTexBufferRangeOES,
+ GLTexCoord1d,
+ GLTexCoord1dv,
+ GLTexCoord1f,
+ GLTexCoord1fv,
+ GLTexCoord1i,
+ GLTexCoord1iv,
+ GLTexCoord1s,
+ GLTexCoord1sv,
+ GLTexCoord2d,
+ GLTexCoord2dv,
+ GLTexCoord2f,
+ GLTexCoord2fv,
+ GLTexCoord2i,
+ GLTexCoord2iv,
+ GLTexCoord2s,
+ GLTexCoord2sv,
+ GLTexCoord3d,
+ GLTexCoord3dv,
+ GLTexCoord3f,
+ GLTexCoord3fv,
+ GLTexCoord3i,
+ GLTexCoord3iv,
+ GLTexCoord3s,
+ GLTexCoord3sv,
+ GLTexCoord4d,
+ GLTexCoord4dv,
+ GLTexCoord4f,
+ GLTexCoord4fv,
+ GLTexCoord4i,
+ GLTexCoord4iv,
+ GLTexCoord4s,
+ GLTexCoord4sv,
+ GLTexCoordP1ui,
+ GLTexCoordP1uiv,
+ GLTexCoordP2ui,
+ GLTexCoordP2uiv,
+ GLTexCoordP3ui,
+ GLTexCoordP3uiv,
+ GLTexCoordP4ui,
+ GLTexCoordP4uiv,
+ GLTexCoordPointer,
+ GLTexEnvf,
+ GLTexEnvfv,
+ GLTexEnvi,
+ GLTexEnviv,
+ GLTexEnvx,
+ GLTexEnvxv,
+ GLTexGend,
+ GLTexGendv,
+ GLTexGenf,
+ GLTexGenfOES,
+ GLTexGenfv,
+ GLTexGenfvOES,
+ GLTexGeni,
+ GLTexGeniOES,
+ GLTexGeniv,
+ GLTexGenivOES,
+ GLTexGenxOES,
+ GLTexGenxvOES,
+ GLTexImage1D,
+ GLTexImage2D,
+ GLTexImage2DExternalANGLE,
+ GLTexImage2DMultisample,
+ GLTexImage2DRobustANGLE,
+ GLTexImage3D,
+ GLTexImage3DMultisample,
+ GLTexImage3DOES,
+ GLTexImage3DRobustANGLE,
+ GLTexParameterIiv,
+ GLTexParameterIivEXT,
+ GLTexParameterIivOES,
+ GLTexParameterIivRobustANGLE,
+ GLTexParameterIuiv,
+ GLTexParameterIuivEXT,
+ GLTexParameterIuivOES,
+ GLTexParameterIuivRobustANGLE,
+ GLTexParameterf,
+ GLTexParameterfv,
+ GLTexParameterfvRobustANGLE,
+ GLTexParameteri,
+ GLTexParameteriv,
+ GLTexParameterivRobustANGLE,
+ GLTexParameterx,
+ GLTexParameterxv,
+ GLTexStorage1D,
+ GLTexStorage1DEXT,
+ GLTexStorage2D,
+ GLTexStorage2DEXT,
+ GLTexStorage2DMultisample,
+ GLTexStorage2DMultisampleANGLE,
+ GLTexStorage3D,
+ GLTexStorage3DEXT,
+ GLTexStorage3DMultisample,
+ GLTexStorage3DMultisampleOES,
+ GLTexStorageMem2DEXT,
+ GLTexStorageMem2DMultisampleEXT,
+ GLTexStorageMem3DEXT,
+ GLTexStorageMem3DMultisampleEXT,
+ GLTexStorageMemFlags2DANGLE,
+ GLTexStorageMemFlags2DMultisampleANGLE,
+ GLTexStorageMemFlags3DANGLE,
+ GLTexStorageMemFlags3DMultisampleANGLE,
+ GLTexSubImage1D,
+ GLTexSubImage2D,
+ GLTexSubImage2DRobustANGLE,
+ GLTexSubImage3D,
+ GLTexSubImage3DOES,
+ GLTexSubImage3DRobustANGLE,
+ GLTextureBarrier,
+ GLTextureBuffer,
+ GLTextureBufferRange,
+ GLTextureParameterIiv,
+ GLTextureParameterIuiv,
+ GLTextureParameterf,
+ GLTextureParameterfv,
+ GLTextureParameteri,
+ GLTextureParameteriv,
+ GLTextureStorage1D,
+ GLTextureStorage2D,
+ GLTextureStorage2DMultisample,
+ GLTextureStorage3D,
+ GLTextureStorage3DMultisample,
+ GLTextureSubImage1D,
+ GLTextureSubImage2D,
+ GLTextureSubImage3D,
+ GLTextureView,
+ GLTransformFeedbackBufferBase,
+ GLTransformFeedbackBufferRange,
+ GLTransformFeedbackVaryings,
+ GLTranslated,
+ GLTranslatef,
+ GLTranslatex,
+ GLUniform1d,
+ GLUniform1dv,
+ GLUniform1f,
+ GLUniform1fv,
+ GLUniform1i,
+ GLUniform1iv,
+ GLUniform1ui,
+ GLUniform1uiv,
+ GLUniform2d,
+ GLUniform2dv,
+ GLUniform2f,
+ GLUniform2fv,
+ GLUniform2i,
+ GLUniform2iv,
+ GLUniform2ui,
+ GLUniform2uiv,
+ GLUniform3d,
+ GLUniform3dv,
+ GLUniform3f,
+ GLUniform3fv,
+ GLUniform3i,
+ GLUniform3iv,
+ GLUniform3ui,
+ GLUniform3uiv,
+ GLUniform4d,
+ GLUniform4dv,
+ GLUniform4f,
+ GLUniform4fv,
+ GLUniform4i,
+ GLUniform4iv,
+ GLUniform4ui,
+ GLUniform4uiv,
+ GLUniformBlockBinding,
+ GLUniformMatrix2dv,
+ GLUniformMatrix2fv,
+ GLUniformMatrix2x3dv,
+ GLUniformMatrix2x3fv,
+ GLUniformMatrix2x4dv,
+ GLUniformMatrix2x4fv,
+ GLUniformMatrix3dv,
+ GLUniformMatrix3fv,
+ GLUniformMatrix3x2dv,
+ GLUniformMatrix3x2fv,
+ GLUniformMatrix3x4dv,
+ GLUniformMatrix3x4fv,
+ GLUniformMatrix4dv,
+ GLUniformMatrix4fv,
+ GLUniformMatrix4x2dv,
+ GLUniformMatrix4x2fv,
+ GLUniformMatrix4x3dv,
+ GLUniformMatrix4x3fv,
+ GLUniformSubroutinesuiv,
+ GLUnmapBuffer,
+ GLUnmapBufferOES,
+ GLUnmapNamedBuffer,
+ GLUseProgram,
+ GLUseProgramStages,
+ GLUseProgramStagesEXT,
+ GLValidateProgram,
+ GLValidateProgramPipeline,
+ GLValidateProgramPipelineEXT,
+ GLVertex2d,
+ GLVertex2dv,
+ GLVertex2f,
+ GLVertex2fv,
+ GLVertex2i,
+ GLVertex2iv,
+ GLVertex2s,
+ GLVertex2sv,
+ GLVertex3d,
+ GLVertex3dv,
+ GLVertex3f,
+ GLVertex3fv,
+ GLVertex3i,
+ GLVertex3iv,
+ GLVertex3s,
+ GLVertex3sv,
+ GLVertex4d,
+ GLVertex4dv,
+ GLVertex4f,
+ GLVertex4fv,
+ GLVertex4i,
+ GLVertex4iv,
+ GLVertex4s,
+ GLVertex4sv,
+ GLVertexArrayAttribBinding,
+ GLVertexArrayAttribFormat,
+ GLVertexArrayAttribIFormat,
+ GLVertexArrayAttribLFormat,
+ GLVertexArrayBindingDivisor,
+ GLVertexArrayElementBuffer,
+ GLVertexArrayVertexBuffer,
+ GLVertexArrayVertexBuffers,
+ GLVertexAttrib1d,
+ GLVertexAttrib1dv,
+ GLVertexAttrib1f,
+ GLVertexAttrib1fv,
+ GLVertexAttrib1s,
+ GLVertexAttrib1sv,
+ GLVertexAttrib2d,
+ GLVertexAttrib2dv,
+ GLVertexAttrib2f,
+ GLVertexAttrib2fv,
+ GLVertexAttrib2s,
+ GLVertexAttrib2sv,
+ GLVertexAttrib3d,
+ GLVertexAttrib3dv,
+ GLVertexAttrib3f,
+ GLVertexAttrib3fv,
+ GLVertexAttrib3s,
+ GLVertexAttrib3sv,
+ GLVertexAttrib4Nbv,
+ GLVertexAttrib4Niv,
+ GLVertexAttrib4Nsv,
+ GLVertexAttrib4Nub,
+ GLVertexAttrib4Nubv,
+ GLVertexAttrib4Nuiv,
+ GLVertexAttrib4Nusv,
+ GLVertexAttrib4bv,
+ GLVertexAttrib4d,
+ GLVertexAttrib4dv,
+ GLVertexAttrib4f,
+ GLVertexAttrib4fv,
+ GLVertexAttrib4iv,
+ GLVertexAttrib4s,
+ GLVertexAttrib4sv,
+ GLVertexAttrib4ubv,
+ GLVertexAttrib4uiv,
+ GLVertexAttrib4usv,
+ GLVertexAttribBinding,
+ GLVertexAttribDivisor,
+ GLVertexAttribDivisorANGLE,
+ GLVertexAttribDivisorEXT,
+ GLVertexAttribFormat,
+ GLVertexAttribI1i,
+ GLVertexAttribI1iv,
+ GLVertexAttribI1ui,
+ GLVertexAttribI1uiv,
+ GLVertexAttribI2i,
+ GLVertexAttribI2iv,
+ GLVertexAttribI2ui,
+ GLVertexAttribI2uiv,
+ GLVertexAttribI3i,
+ GLVertexAttribI3iv,
+ GLVertexAttribI3ui,
+ GLVertexAttribI3uiv,
+ GLVertexAttribI4bv,
+ GLVertexAttribI4i,
+ GLVertexAttribI4iv,
+ GLVertexAttribI4sv,
+ GLVertexAttribI4ubv,
+ GLVertexAttribI4ui,
+ GLVertexAttribI4uiv,
+ GLVertexAttribI4usv,
+ GLVertexAttribIFormat,
+ GLVertexAttribIPointer,
+ GLVertexAttribL1d,
+ GLVertexAttribL1dv,
+ GLVertexAttribL2d,
+ GLVertexAttribL2dv,
+ GLVertexAttribL3d,
+ GLVertexAttribL3dv,
+ GLVertexAttribL4d,
+ GLVertexAttribL4dv,
+ GLVertexAttribLFormat,
+ GLVertexAttribLPointer,
+ GLVertexAttribP1ui,
+ GLVertexAttribP1uiv,
+ GLVertexAttribP2ui,
+ GLVertexAttribP2uiv,
+ GLVertexAttribP3ui,
+ GLVertexAttribP3uiv,
+ GLVertexAttribP4ui,
+ GLVertexAttribP4uiv,
+ GLVertexAttribPointer,
+ GLVertexBindingDivisor,
+ GLVertexP2ui,
+ GLVertexP2uiv,
+ GLVertexP3ui,
+ GLVertexP3uiv,
+ GLVertexP4ui,
+ GLVertexP4uiv,
+ GLVertexPointer,
+ GLViewport,
+ GLViewportArrayv,
+ GLViewportIndexedf,
+ GLViewportIndexedfv,
+ GLWaitSemaphoreEXT,
+ GLWaitSync,
+ GLWeightPointerOES,
+ GLWindowPos2d,
+ GLWindowPos2dv,
+ GLWindowPos2f,
+ GLWindowPos2fv,
+ GLWindowPos2i,
+ GLWindowPos2iv,
+ GLWindowPos2s,
+ GLWindowPos2sv,
+ GLWindowPos3d,
+ GLWindowPos3dv,
+ GLWindowPos3f,
+ GLWindowPos3fv,
+ GLWindowPos3i,
+ GLWindowPos3iv,
+ GLWindowPos3s,
+ GLWindowPos3sv,
+ WGLChoosePixelFormat,
+ WGLCopyContext,
+ WGLCreateContext,
+ WGLCreateLayerContext,
+ WGLDeleteContext,
+ WGLDescribeLayerPlane,
+ WGLDescribePixelFormat,
+ WGLGetCurrentContext,
+ WGLGetCurrentDC,
+ WGLGetEnhMetaFilePixelFormat,
+ WGLGetLayerPaletteEntries,
+ WGLGetPixelFormat,
+ WGLGetProcAddress,
+ WGLMakeCurrent,
+ WGLRealizeLayerPalette,
+ WGLSetLayerPaletteEntries,
+ WGLSetPixelFormat,
+ WGLShareLists,
+ WGLSwapBuffers,
+ WGLSwapLayerBuffers,
+ WGLUseFontBitmaps,
+ WGLUseFontBitmapsA,
+ WGLUseFontBitmapsW,
+ WGLUseFontOutlines,
+ WGLUseFontOutlinesA,
+ WGLUseFontOutlinesW
+};
+
+const char *GetEntryPointName(EntryPoint ep);
+} // namespace angle
+#endif // COMMON_ENTRY_POINTS_ENUM_AUTOGEN_H_
diff --git a/gfx/angle/checkout/src/common/event_tracer.cpp b/gfx/angle/checkout/src/common/event_tracer.cpp
new file mode 100644
index 0000000000..151cb2cd70
--- /dev/null
+++ b/gfx/angle/checkout/src/common/event_tracer.cpp
@@ -0,0 +1,53 @@
+// Copyright 2012 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "common/event_tracer.h"
+
+#include "common/debug.h"
+
+namespace angle
+{
+
+const unsigned char *GetTraceCategoryEnabledFlag(PlatformMethods *platform, const char *name)
+{
+ ASSERT(platform);
+
+ const unsigned char *categoryEnabledFlag =
+ platform->getTraceCategoryEnabledFlag(platform, name);
+ if (categoryEnabledFlag != nullptr)
+ {
+ return categoryEnabledFlag;
+ }
+
+ static unsigned char disabled = 0;
+ return &disabled;
+}
+
+angle::TraceEventHandle AddTraceEvent(PlatformMethods *platform,
+ char phase,
+ const unsigned char *categoryGroupEnabled,
+ const char *name,
+ unsigned long long id,
+ int numArgs,
+ const char **argNames,
+ const unsigned char *argTypes,
+ const unsigned long long *argValues,
+ unsigned char flags)
+{
+ ASSERT(platform);
+
+ double timestamp = platform->monotonicallyIncreasingTime(platform);
+
+ if (timestamp != 0)
+ {
+ angle::TraceEventHandle handle =
+ platform->addTraceEvent(platform, phase, categoryGroupEnabled, name, id, timestamp,
+ numArgs, argNames, argTypes, argValues, flags);
+ return handle;
+ }
+
+ return static_cast<angle::TraceEventHandle>(0);
+}
+
+} // namespace angle
diff --git a/gfx/angle/checkout/src/common/event_tracer.h b/gfx/angle/checkout/src/common/event_tracer.h
new file mode 100644
index 0000000000..128d88b9e0
--- /dev/null
+++ b/gfx/angle/checkout/src/common/event_tracer.h
@@ -0,0 +1,26 @@
+// Copyright 2012 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMMON_EVENT_TRACER_H_
+#define COMMON_EVENT_TRACER_H_
+
+#include "common/platform.h"
+#include "platform/PlatformMethods.h"
+
+namespace angle
+{
+const unsigned char *GetTraceCategoryEnabledFlag(PlatformMethods *platform, const char *name);
+angle::TraceEventHandle AddTraceEvent(PlatformMethods *platform,
+ char phase,
+ const unsigned char *categoryGroupEnabled,
+ const char *name,
+ unsigned long long id,
+ int numArgs,
+ const char **argNames,
+ const unsigned char *argTypes,
+ const unsigned long long *argValues,
+ unsigned char flags);
+} // namespace angle
+
+#endif // COMMON_EVENT_TRACER_H_
diff --git a/gfx/angle/checkout/src/common/hash_utils.h b/gfx/angle/checkout/src/common/hash_utils.h
new file mode 100644
index 0000000000..aec4e7f77c
--- /dev/null
+++ b/gfx/angle/checkout/src/common/hash_utils.h
@@ -0,0 +1,39 @@
+//
+// Copyright 2018 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// hash_utils.h: Hashing based helper functions.
+
+#ifndef COMMON_HASHUTILS_H_
+#define COMMON_HASHUTILS_H_
+
+#include "common/debug.h"
+#include "common/third_party/xxhash/xxhash.h"
+
+namespace angle
+{
+// Computes a hash of "key". Any data passed to this function must be multiples of
+// 4 bytes, since the PMurHash32 method can only operate increments of 4-byte words.
+inline std::size_t ComputeGenericHash(const void *key, size_t keySize)
+{
+ static constexpr unsigned int kSeed = 0xABCDEF98;
+
+ // We can't support "odd" alignments. ComputeGenericHash requires aligned types
+ ASSERT(keySize % 4 == 0);
+#if defined(ANGLE_IS_64_BIT_CPU)
+ return XXH64(key, keySize, kSeed);
+#else
+ return XXH32(key, keySize, kSeed);
+#endif // defined(ANGLE_IS_64_BIT_CPU)
+}
+
+template <typename T>
+std::size_t ComputeGenericHash(const T &key)
+{
+ static_assert(sizeof(key) % 4 == 0, "ComputeGenericHash requires aligned types");
+ return ComputeGenericHash(&key, sizeof(key));
+}
+} // namespace angle
+
+#endif // COMMON_HASHUTILS_H_
diff --git a/gfx/angle/checkout/src/common/mathutil.cpp b/gfx/angle/checkout/src/common/mathutil.cpp
new file mode 100644
index 0000000000..5cbc6a920a
--- /dev/null
+++ b/gfx/angle/checkout/src/common/mathutil.cpp
@@ -0,0 +1,83 @@
+//
+// Copyright 2013 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+
+// mathutil.cpp: Math and bit manipulation functions.
+
+#include "common/mathutil.h"
+
+#include <math.h>
+#include <algorithm>
+
+namespace gl
+{
+
+namespace
+{
+
+struct RGB9E5Data
+{
+ unsigned int R : 9;
+ unsigned int G : 9;
+ unsigned int B : 9;
+ unsigned int E : 5;
+};
+
+// B is the exponent bias (15)
+constexpr int g_sharedexp_bias = 15;
+
+// N is the number of mantissa bits per component (9)
+constexpr int g_sharedexp_mantissabits = 9;
+
+// number of mantissa bits per component pre-biased
+constexpr int g_sharedexp_biased_mantissabits = g_sharedexp_bias + g_sharedexp_mantissabits;
+
+// Emax is the maximum allowed biased exponent value (31)
+constexpr int g_sharedexp_maxexponent = 31;
+
+constexpr float g_sharedexp_max =
+ ((static_cast<float>(1 << g_sharedexp_mantissabits) - 1) /
+ static_cast<float>(1 << g_sharedexp_mantissabits)) *
+ static_cast<float>(1 << (g_sharedexp_maxexponent - g_sharedexp_bias));
+
+} // anonymous namespace
+
+unsigned int convertRGBFloatsTo999E5(float red, float green, float blue)
+{
+ const float red_c = std::max<float>(0, std::min(g_sharedexp_max, red));
+ const float green_c = std::max<float>(0, std::min(g_sharedexp_max, green));
+ const float blue_c = std::max<float>(0, std::min(g_sharedexp_max, blue));
+
+ const float max_c = std::max<float>(std::max<float>(red_c, green_c), blue_c);
+ const float exp_p =
+ std::max<float>(-g_sharedexp_bias - 1, floor(log(max_c))) + 1 + g_sharedexp_bias;
+ const int max_s = static_cast<int>(
+ floor((max_c / (pow(2.0f, exp_p - g_sharedexp_biased_mantissabits))) + 0.5f));
+ const int exp_s =
+ static_cast<int>((max_s < pow(2.0f, g_sharedexp_mantissabits)) ? exp_p : exp_p + 1);
+ const float pow2_exp = pow(2.0f, static_cast<float>(exp_s) - g_sharedexp_biased_mantissabits);
+
+ RGB9E5Data output;
+ output.R = static_cast<unsigned int>(floor((red_c / pow2_exp) + 0.5f));
+ output.G = static_cast<unsigned int>(floor((green_c / pow2_exp) + 0.5f));
+ output.B = static_cast<unsigned int>(floor((blue_c / pow2_exp) + 0.5f));
+ output.E = exp_s;
+
+ return bitCast<unsigned int>(output);
+}
+
+void convert999E5toRGBFloats(unsigned int input, float *red, float *green, float *blue)
+{
+ const RGB9E5Data *inputData = reinterpret_cast<const RGB9E5Data *>(&input);
+
+ const float pow2_exp =
+ pow(2.0f, static_cast<float>(inputData->E) - g_sharedexp_biased_mantissabits);
+
+ *red = inputData->R * pow2_exp;
+ *green = inputData->G * pow2_exp;
+ *blue = inputData->B * pow2_exp;
+}
+
+} // namespace gl
diff --git a/gfx/angle/checkout/src/common/mathutil.h b/gfx/angle/checkout/src/common/mathutil.h
new file mode 100644
index 0000000000..560929239f
--- /dev/null
+++ b/gfx/angle/checkout/src/common/mathutil.h
@@ -0,0 +1,1482 @@
+//
+// Copyright 2002 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+
+// mathutil.h: Math and bit manipulation functions.
+
+#ifndef COMMON_MATHUTIL_H_
+#define COMMON_MATHUTIL_H_
+
+#include <math.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <algorithm>
+#include <limits>
+
+#include <anglebase/numerics/safe_math.h>
+
+#include "common/debug.h"
+#include "common/platform.h"
+
+namespace angle
+{
+using base::CheckedNumeric;
+using base::IsValueInRangeForNumericType;
+} // namespace angle
+
+namespace gl
+{
+
+const unsigned int Float32One = 0x3F800000;
+const unsigned short Float16One = 0x3C00;
+
+template <typename T>
+inline constexpr bool isPow2(T x)
+{
+ static_assert(std::is_integral<T>::value, "isPow2 must be called on an integer type.");
+ return (x & (x - 1)) == 0 && (x != 0);
+}
+
+template <typename T>
+inline int log2(T x)
+{
+ static_assert(std::is_integral<T>::value, "log2 must be called on an integer type.");
+ int r = 0;
+ while ((x >> r) > 1)
+ r++;
+ return r;
+}
+
+inline unsigned int ceilPow2(unsigned int x)
+{
+ if (x != 0)
+ x--;
+ x |= x >> 1;
+ x |= x >> 2;
+ x |= x >> 4;
+ x |= x >> 8;
+ x |= x >> 16;
+ x++;
+
+ return x;
+}
+
+template <typename DestT, typename SrcT>
+inline DestT clampCast(SrcT value)
+{
+ // For floating-point types with denormalization, min returns the minimum positive normalized
+ // value. To find the value that has no values less than it, use numeric_limits::lowest.
+ constexpr const long double destLo =
+ static_cast<long double>(std::numeric_limits<DestT>::lowest());
+ constexpr const long double destHi =
+ static_cast<long double>(std::numeric_limits<DestT>::max());
+ constexpr const long double srcLo =
+ static_cast<long double>(std::numeric_limits<SrcT>::lowest());
+ constexpr long double srcHi = static_cast<long double>(std::numeric_limits<SrcT>::max());
+
+ if (destHi < srcHi)
+ {
+ DestT destMax = std::numeric_limits<DestT>::max();
+ if (value >= static_cast<SrcT>(destMax))
+ {
+ return destMax;
+ }
+ }
+
+ if (destLo > srcLo)
+ {
+ DestT destLow = std::numeric_limits<DestT>::lowest();
+ if (value <= static_cast<SrcT>(destLow))
+ {
+ return destLow;
+ }
+ }
+
+ return static_cast<DestT>(value);
+}
+
+// Specialize clampCast for bool->int conversion to avoid MSVS 2015 performance warning when the max
+// value is casted to the source type.
+template <>
+inline unsigned int clampCast(bool value)
+{
+ return static_cast<unsigned int>(value);
+}
+
+template <>
+inline int clampCast(bool value)
+{
+ return static_cast<int>(value);
+}
+
+template <typename T, typename MIN, typename MAX>
+inline T clamp(T x, MIN min, MAX max)
+{
+ // Since NaNs fail all comparison tests, a NaN value will default to min
+ return x > min ? (x > max ? max : x) : min;
+}
+
+template <typename T>
+T clampForBitCount(T value, size_t bitCount)
+{
+ static_assert(std::numeric_limits<T>::is_integer, "T must be an integer.");
+
+ if (bitCount == 0)
+ {
+ constexpr T kZero = 0;
+ return kZero;
+ }
+ ASSERT(bitCount <= sizeof(T) * 8);
+
+ constexpr bool kIsSigned = std::numeric_limits<T>::is_signed;
+ ASSERT((bitCount > 1) || !kIsSigned);
+
+ T min = 0;
+ T max = 0;
+ if (bitCount == sizeof(T) * 8)
+ {
+ min = std::numeric_limits<T>::min();
+ max = std::numeric_limits<T>::max();
+ }
+ else
+ {
+ constexpr T kOne = 1;
+ min = (kIsSigned) ? -1 * (kOne << (bitCount - 1)) : 0;
+ max = (kIsSigned) ? (kOne << (bitCount - 1)) - 1 : (kOne << bitCount) - 1;
+ }
+
+ return gl::clamp(value, min, max);
+}
+
+inline float clamp01(float x)
+{
+ return clamp(x, 0.0f, 1.0f);
+}
+
+template <const int n>
+inline unsigned int unorm(float x)
+{
+ const unsigned int max = 0xFFFFFFFF >> (32 - n);
+
+ if (x > 1)
+ {
+ return max;
+ }
+ else if (x < 0)
+ {
+ return 0;
+ }
+ else
+ {
+ return (unsigned int)(max * x + 0.5f);
+ }
+}
+
+inline bool supportsSSE2()
+{
+#if defined(ANGLE_USE_SSE)
+ static bool checked = false;
+ static bool supports = false;
+
+ if (checked)
+ {
+ return supports;
+ }
+
+# if defined(ANGLE_PLATFORM_WINDOWS) && !defined(_M_ARM) && !defined(_M_ARM64)
+ {
+ int info[4];
+ __cpuid(info, 0);
+
+ if (info[0] >= 1)
+ {
+ __cpuid(info, 1);
+
+ supports = (info[3] >> 26) & 1;
+ }
+ }
+# endif // defined(ANGLE_PLATFORM_WINDOWS) && !defined(_M_ARM) && !defined(_M_ARM64)
+ checked = true;
+ return supports;
+#else // defined(ANGLE_USE_SSE)
+ return false;
+#endif
+}
+
+template <typename destType, typename sourceType>
+destType bitCast(const sourceType &source)
+{
+ size_t copySize = std::min(sizeof(destType), sizeof(sourceType));
+ destType output;
+ memcpy(&output, &source, copySize);
+ return output;
+}
+
+// https://stackoverflow.com/a/37581284
+template <typename T>
+static constexpr double normalize(T value)
+{
+ return value < 0 ? -static_cast<double>(value) / std::numeric_limits<T>::min()
+ : static_cast<double>(value) / std::numeric_limits<T>::max();
+}
+
+inline unsigned short float32ToFloat16(float fp32)
+{
+ unsigned int fp32i = bitCast<unsigned int>(fp32);
+ unsigned int sign = (fp32i & 0x80000000) >> 16;
+ unsigned int abs = fp32i & 0x7FFFFFFF;
+
+ if (abs > 0x7F800000)
+ { // NaN
+ return 0x7FFF;
+ }
+ else if (abs > 0x47FFEFFF)
+ { // Infinity
+ return static_cast<uint16_t>(sign | 0x7C00);
+ }
+ else if (abs < 0x38800000) // Denormal
+ {
+ unsigned int mantissa = (abs & 0x007FFFFF) | 0x00800000;
+ int e = 113 - (abs >> 23);
+
+ if (e < 24)
+ {
+ abs = mantissa >> e;
+ }
+ else
+ {
+ abs = 0;
+ }
+
+ return static_cast<unsigned short>(sign | (abs + 0x00000FFF + ((abs >> 13) & 1)) >> 13);
+ }
+ else
+ {
+ return static_cast<unsigned short>(
+ sign | (abs + 0xC8000000 + 0x00000FFF + ((abs >> 13) & 1)) >> 13);
+ }
+}
+
+float float16ToFloat32(unsigned short h);
+
+unsigned int convertRGBFloatsTo999E5(float red, float green, float blue);
+void convert999E5toRGBFloats(unsigned int input, float *red, float *green, float *blue);
+
+inline unsigned short float32ToFloat11(float fp32)
+{
+ const unsigned int float32MantissaMask = 0x7FFFFF;
+ const unsigned int float32ExponentMask = 0x7F800000;
+ const unsigned int float32SignMask = 0x80000000;
+ const unsigned int float32ValueMask = ~float32SignMask;
+ const unsigned int float32ExponentFirstBit = 23;
+ const unsigned int float32ExponentBias = 127;
+
+ const unsigned short float11Max = 0x7BF;
+ const unsigned short float11MantissaMask = 0x3F;
+ const unsigned short float11ExponentMask = 0x7C0;
+ const unsigned short float11BitMask = 0x7FF;
+ const unsigned int float11ExponentBias = 14;
+
+ const unsigned int float32Maxfloat11 = 0x477E0000;
+ const unsigned int float32MinNormfloat11 = 0x38800000;
+ const unsigned int float32MinDenormfloat11 = 0x35000080;
+
+ const unsigned int float32Bits = bitCast<unsigned int>(fp32);
+ const bool float32Sign = (float32Bits & float32SignMask) == float32SignMask;
+
+ unsigned int float32Val = float32Bits & float32ValueMask;
+
+ if ((float32Val & float32ExponentMask) == float32ExponentMask)
+ {
+ // INF or NAN
+ if ((float32Val & float32MantissaMask) != 0)
+ {
+ return float11ExponentMask |
+ (((float32Val >> 17) | (float32Val >> 11) | (float32Val >> 6) | (float32Val)) &
+ float11MantissaMask);
+ }
+ else if (float32Sign)
+ {
+ // -INF is clamped to 0 since float11 is positive only
+ return 0;
+ }
+ else
+ {
+ return float11ExponentMask;
+ }
+ }
+ else if (float32Sign)
+ {
+ // float11 is positive only, so clamp to zero
+ return 0;
+ }
+ else if (float32Val > float32Maxfloat11)
+ {
+ // The number is too large to be represented as a float11, set to max
+ return float11Max;
+ }
+ else if (float32Val < float32MinDenormfloat11)
+ {
+ // The number is too small to be represented as a denormalized float11, set to 0
+ return 0;
+ }
+ else
+ {
+ if (float32Val < float32MinNormfloat11)
+ {
+ // The number is too small to be represented as a normalized float11
+ // Convert it to a denormalized value.
+ const unsigned int shift = (float32ExponentBias - float11ExponentBias) -
+ (float32Val >> float32ExponentFirstBit);
+ ASSERT(shift < 32);
+ float32Val =
+ ((1 << float32ExponentFirstBit) | (float32Val & float32MantissaMask)) >> shift;
+ }
+ else
+ {
+ // Rebias the exponent to represent the value as a normalized float11
+ float32Val += 0xC8000000;
+ }
+
+ return ((float32Val + 0xFFFF + ((float32Val >> 17) & 1)) >> 17) & float11BitMask;
+ }
+}
+
+inline unsigned short float32ToFloat10(float fp32)
+{
+ const unsigned int float32MantissaMask = 0x7FFFFF;
+ const unsigned int float32ExponentMask = 0x7F800000;
+ const unsigned int float32SignMask = 0x80000000;
+ const unsigned int float32ValueMask = ~float32SignMask;
+ const unsigned int float32ExponentFirstBit = 23;
+ const unsigned int float32ExponentBias = 127;
+
+ const unsigned short float10Max = 0x3DF;
+ const unsigned short float10MantissaMask = 0x1F;
+ const unsigned short float10ExponentMask = 0x3E0;
+ const unsigned short float10BitMask = 0x3FF;
+ const unsigned int float10ExponentBias = 14;
+
+ const unsigned int float32Maxfloat10 = 0x477C0000;
+ const unsigned int float32MinNormfloat10 = 0x38800000;
+ const unsigned int float32MinDenormfloat10 = 0x35800040;
+
+ const unsigned int float32Bits = bitCast<unsigned int>(fp32);
+ const bool float32Sign = (float32Bits & float32SignMask) == float32SignMask;
+
+ unsigned int float32Val = float32Bits & float32ValueMask;
+
+ if ((float32Val & float32ExponentMask) == float32ExponentMask)
+ {
+ // INF or NAN
+ if ((float32Val & float32MantissaMask) != 0)
+ {
+ return float10ExponentMask |
+ (((float32Val >> 18) | (float32Val >> 13) | (float32Val >> 3) | (float32Val)) &
+ float10MantissaMask);
+ }
+ else if (float32Sign)
+ {
+ // -INF is clamped to 0 since float10 is positive only
+ return 0;
+ }
+ else
+ {
+ return float10ExponentMask;
+ }
+ }
+ else if (float32Sign)
+ {
+ // float10 is positive only, so clamp to zero
+ return 0;
+ }
+ else if (float32Val > float32Maxfloat10)
+ {
+ // The number is too large to be represented as a float10, set to max
+ return float10Max;
+ }
+ else if (float32Val < float32MinDenormfloat10)
+ {
+ // The number is too small to be represented as a denormalized float10, set to 0
+ return 0;
+ }
+ else
+ {
+ if (float32Val < float32MinNormfloat10)
+ {
+ // The number is too small to be represented as a normalized float10
+ // Convert it to a denormalized value.
+ const unsigned int shift = (float32ExponentBias - float10ExponentBias) -
+ (float32Val >> float32ExponentFirstBit);
+ ASSERT(shift < 32);
+ float32Val =
+ ((1 << float32ExponentFirstBit) | (float32Val & float32MantissaMask)) >> shift;
+ }
+ else
+ {
+ // Rebias the exponent to represent the value as a normalized float10
+ float32Val += 0xC8000000;
+ }
+
+ return ((float32Val + 0x1FFFF + ((float32Val >> 18) & 1)) >> 18) & float10BitMask;
+ }
+}
+
+inline float float11ToFloat32(unsigned short fp11)
+{
+ unsigned short exponent = (fp11 >> 6) & 0x1F;
+ unsigned short mantissa = fp11 & 0x3F;
+
+ if (exponent == 0x1F)
+ {
+ // INF or NAN
+ return bitCast<float>(0x7f800000 | (mantissa << 17));
+ }
+ else
+ {
+ if (exponent != 0)
+ {
+ // normalized
+ }
+ else if (mantissa != 0)
+ {
+ // The value is denormalized
+ exponent = 1;
+
+ do
+ {
+ exponent--;
+ mantissa <<= 1;
+ } while ((mantissa & 0x40) == 0);
+
+ mantissa = mantissa & 0x3F;
+ }
+ else // The value is zero
+ {
+ exponent = static_cast<unsigned short>(-112);
+ }
+
+ return bitCast<float>(((exponent + 112) << 23) | (mantissa << 17));
+ }
+}
+
+inline float float10ToFloat32(unsigned short fp10)
+{
+ unsigned short exponent = (fp10 >> 5) & 0x1F;
+ unsigned short mantissa = fp10 & 0x1F;
+
+ if (exponent == 0x1F)
+ {
+ // INF or NAN
+ return bitCast<float>(0x7f800000 | (mantissa << 17));
+ }
+ else
+ {
+ if (exponent != 0)
+ {
+ // normalized
+ }
+ else if (mantissa != 0)
+ {
+ // The value is denormalized
+ exponent = 1;
+
+ do
+ {
+ exponent--;
+ mantissa <<= 1;
+ } while ((mantissa & 0x20) == 0);
+
+ mantissa = mantissa & 0x1F;
+ }
+ else // The value is zero
+ {
+ exponent = static_cast<unsigned short>(-112);
+ }
+
+ return bitCast<float>(((exponent + 112) << 23) | (mantissa << 18));
+ }
+}
+
+// Converts to and from float and 16.16 fixed point format.
+inline float ConvertFixedToFloat(int32_t fixedInput)
+{
+ return static_cast<float>(fixedInput) / 65536.0f;
+}
+
+inline uint32_t ConvertFloatToFixed(float floatInput)
+{
+ static constexpr uint32_t kHighest = 32767 * 65536 + 65535;
+ static constexpr uint32_t kLowest = static_cast<uint32_t>(-32768 * 65536 + 65535);
+
+ if (floatInput > 32767.65535)
+ {
+ return kHighest;
+ }
+ else if (floatInput < -32768.65535)
+ {
+ return kLowest;
+ }
+ else
+ {
+ return static_cast<uint32_t>(floatInput * 65536);
+ }
+}
+
+template <typename T>
+inline float normalizedToFloat(T input)
+{
+ static_assert(std::numeric_limits<T>::is_integer, "T must be an integer.");
+
+ if (sizeof(T) > 2)
+ {
+ // float has only a 23 bit mantissa, so we need to do the calculation in double precision
+ constexpr double inverseMax = 1.0 / std::numeric_limits<T>::max();
+ return static_cast<float>(input * inverseMax);
+ }
+ else
+ {
+ constexpr float inverseMax = 1.0f / std::numeric_limits<T>::max();
+ return input * inverseMax;
+ }
+}
+
+template <unsigned int inputBitCount, typename T>
+inline float normalizedToFloat(T input)
+{
+ static_assert(std::numeric_limits<T>::is_integer, "T must be an integer.");
+ static_assert(inputBitCount < (sizeof(T) * 8), "T must have more bits than inputBitCount.");
+ ASSERT((input & ~((1 << inputBitCount) - 1)) == 0);
+
+ if (inputBitCount > 23)
+ {
+ // float has only a 23 bit mantissa, so we need to do the calculation in double precision
+ constexpr double inverseMax = 1.0 / ((1 << inputBitCount) - 1);
+ return static_cast<float>(input * inverseMax);
+ }
+ else
+ {
+ constexpr float inverseMax = 1.0f / ((1 << inputBitCount) - 1);
+ return input * inverseMax;
+ }
+}
+
+template <typename T>
+inline T floatToNormalized(float input)
+{
+ if constexpr (sizeof(T) > 2)
+ {
+ // float has only a 23 bit mantissa, so we need to do the calculation in double precision
+ return static_cast<T>(std::numeric_limits<T>::max() * static_cast<double>(input) + 0.5);
+ }
+ else
+ {
+ return static_cast<T>(std::numeric_limits<T>::max() * input + 0.5f);
+ }
+}
+
+template <unsigned int outputBitCount, typename T>
+inline T floatToNormalized(float input)
+{
+ static_assert(outputBitCount < (sizeof(T) * 8), "T must have more bits than outputBitCount.");
+
+ if (outputBitCount > 23)
+ {
+ // float has only a 23 bit mantissa, so we need to do the calculation in double precision
+ return static_cast<T>(((1 << outputBitCount) - 1) * static_cast<double>(input) + 0.5);
+ }
+ else
+ {
+ return static_cast<T>(((1 << outputBitCount) - 1) * input + 0.5f);
+ }
+}
+
+template <unsigned int inputBitCount, unsigned int inputBitStart, typename T>
+inline T getShiftedData(T input)
+{
+ static_assert(inputBitCount + inputBitStart <= (sizeof(T) * 8),
+ "T must have at least as many bits as inputBitCount + inputBitStart.");
+ const T mask = (1 << inputBitCount) - 1;
+ return (input >> inputBitStart) & mask;
+}
+
+template <unsigned int inputBitCount, unsigned int inputBitStart, typename T>
+inline T shiftData(T input)
+{
+ static_assert(inputBitCount + inputBitStart <= (sizeof(T) * 8),
+ "T must have at least as many bits as inputBitCount + inputBitStart.");
+ const T mask = (1 << inputBitCount) - 1;
+ return (input & mask) << inputBitStart;
+}
+
+inline unsigned int CountLeadingZeros(uint32_t x)
+{
+ // Use binary search to find the amount of leading zeros.
+ unsigned int zeros = 32u;
+ uint32_t y;
+
+ y = x >> 16u;
+ if (y != 0)
+ {
+ zeros = zeros - 16u;
+ x = y;
+ }
+ y = x >> 8u;
+ if (y != 0)
+ {
+ zeros = zeros - 8u;
+ x = y;
+ }
+ y = x >> 4u;
+ if (y != 0)
+ {
+ zeros = zeros - 4u;
+ x = y;
+ }
+ y = x >> 2u;
+ if (y != 0)
+ {
+ zeros = zeros - 2u;
+ x = y;
+ }
+ y = x >> 1u;
+ if (y != 0)
+ {
+ return zeros - 2u;
+ }
+ return zeros - x;
+}
+
+inline unsigned char average(unsigned char a, unsigned char b)
+{
+ return ((a ^ b) >> 1) + (a & b);
+}
+
+inline signed char average(signed char a, signed char b)
+{
+ return ((short)a + (short)b) / 2;
+}
+
+inline unsigned short average(unsigned short a, unsigned short b)
+{
+ return ((a ^ b) >> 1) + (a & b);
+}
+
+inline signed short average(signed short a, signed short b)
+{
+ return ((int)a + (int)b) / 2;
+}
+
+inline unsigned int average(unsigned int a, unsigned int b)
+{
+ return ((a ^ b) >> 1) + (a & b);
+}
+
+inline int average(int a, int b)
+{
+ long long average = (static_cast<long long>(a) + static_cast<long long>(b)) / 2LL;
+ return static_cast<int>(average);
+}
+
+inline float average(float a, float b)
+{
+ return (a + b) * 0.5f;
+}
+
+inline unsigned short averageHalfFloat(unsigned short a, unsigned short b)
+{
+ return float32ToFloat16((float16ToFloat32(a) + float16ToFloat32(b)) * 0.5f);
+}
+
+inline unsigned int averageFloat11(unsigned int a, unsigned int b)
+{
+ return float32ToFloat11((float11ToFloat32(static_cast<unsigned short>(a)) +
+ float11ToFloat32(static_cast<unsigned short>(b))) *
+ 0.5f);
+}
+
+inline unsigned int averageFloat10(unsigned int a, unsigned int b)
+{
+ return float32ToFloat10((float10ToFloat32(static_cast<unsigned short>(a)) +
+ float10ToFloat32(static_cast<unsigned short>(b))) *
+ 0.5f);
+}
+
+template <typename T>
+class Range
+{
+ public:
+ Range() {}
+ Range(T lo, T hi) : mLow(lo), mHigh(hi) {}
+
+ T length() const { return (empty() ? 0 : (mHigh - mLow)); }
+
+ bool intersects(Range<T> other)
+ {
+ if (mLow <= other.mLow)
+ {
+ return other.mLow < mHigh;
+ }
+ else
+ {
+ return mLow < other.mHigh;
+ }
+ }
+
+ // Assumes that end is non-inclusive.. for example, extending to 5 will make "end" 6.
+ void extend(T value)
+ {
+ mLow = value < mLow ? value : mLow;
+ mHigh = value >= mHigh ? (value + 1) : mHigh;
+ }
+
+ bool empty() const { return mHigh <= mLow; }
+
+ bool contains(T value) const { return value >= mLow && value < mHigh; }
+
+ class Iterator final
+ {
+ public:
+ Iterator(T value) : mCurrent(value) {}
+
+ Iterator &operator++()
+ {
+ mCurrent++;
+ return *this;
+ }
+ bool operator==(const Iterator &other) const { return mCurrent == other.mCurrent; }
+ bool operator!=(const Iterator &other) const { return mCurrent != other.mCurrent; }
+ T operator*() const { return mCurrent; }
+
+ private:
+ T mCurrent;
+ };
+
+ Iterator begin() const { return Iterator(mLow); }
+
+ Iterator end() const { return Iterator(mHigh); }
+
+ T low() const { return mLow; }
+ T high() const { return mHigh; }
+
+ void invalidate()
+ {
+ mLow = std::numeric_limits<T>::max();
+ mHigh = std::numeric_limits<T>::min();
+ }
+
+ private:
+ T mLow;
+ T mHigh;
+};
+
+typedef Range<int> RangeI;
+typedef Range<unsigned int> RangeUI;
+
+struct IndexRange
+{
+ struct Undefined
+ {};
+ IndexRange(Undefined) {}
+ IndexRange() : IndexRange(0, 0, 0) {}
+ IndexRange(size_t start_, size_t end_, size_t vertexIndexCount_)
+ : start(start_), end(end_), vertexIndexCount(vertexIndexCount_)
+ {
+ ASSERT(start <= end);
+ }
+
+ // Number of vertices in the range.
+ size_t vertexCount() const { return (end - start) + 1; }
+
+ // Inclusive range of indices that are not primitive restart
+ size_t start;
+ size_t end;
+
+ // Number of non-primitive restart indices
+ size_t vertexIndexCount;
+};
+
+// Combine a floating-point value representing a mantissa (x) and an integer exponent (exp) into a
+// floating-point value. As in GLSL ldexp() built-in.
+inline float Ldexp(float x, int exp)
+{
+ if (exp > 128)
+ {
+ return std::numeric_limits<float>::infinity();
+ }
+ if (exp < -126)
+ {
+ return 0.0f;
+ }
+ double result = static_cast<double>(x) * std::pow(2.0, static_cast<double>(exp));
+ return static_cast<float>(result);
+}
+
+// First, both normalized floating-point values are converted into 16-bit integer values.
+// Then, the results are packed into the returned 32-bit unsigned integer.
+// The first float value will be written to the least significant bits of the output;
+// the last float value will be written to the most significant bits.
+// The conversion of each value to fixed point is done as follows :
+// packSnorm2x16 : round(clamp(c, -1, +1) * 32767.0)
+inline uint32_t packSnorm2x16(float f1, float f2)
+{
+ int16_t leastSignificantBits = static_cast<int16_t>(roundf(clamp(f1, -1.0f, 1.0f) * 32767.0f));
+ int16_t mostSignificantBits = static_cast<int16_t>(roundf(clamp(f2, -1.0f, 1.0f) * 32767.0f));
+ return static_cast<uint32_t>(mostSignificantBits) << 16 |
+ (static_cast<uint32_t>(leastSignificantBits) & 0xFFFF);
+}
+
+// First, unpacks a single 32-bit unsigned integer u into a pair of 16-bit unsigned integers. Then,
+// each component is converted to a normalized floating-point value to generate the returned two
+// float values. The first float value will be extracted from the least significant bits of the
+// input; the last float value will be extracted from the most-significant bits. The conversion for
+// unpacked fixed-point value to floating point is done as follows: unpackSnorm2x16 : clamp(f /
+// 32767.0, -1, +1)
+inline void unpackSnorm2x16(uint32_t u, float *f1, float *f2)
+{
+ int16_t leastSignificantBits = static_cast<int16_t>(u & 0xFFFF);
+ int16_t mostSignificantBits = static_cast<int16_t>(u >> 16);
+ *f1 = clamp(static_cast<float>(leastSignificantBits) / 32767.0f, -1.0f, 1.0f);
+ *f2 = clamp(static_cast<float>(mostSignificantBits) / 32767.0f, -1.0f, 1.0f);
+}
+
+// First, both normalized floating-point values are converted into 16-bit integer values.
+// Then, the results are packed into the returned 32-bit unsigned integer.
+// The first float value will be written to the least significant bits of the output;
+// the last float value will be written to the most significant bits.
+// The conversion of each value to fixed point is done as follows:
+// packUnorm2x16 : round(clamp(c, 0, +1) * 65535.0)
+inline uint32_t packUnorm2x16(float f1, float f2)
+{
+ uint16_t leastSignificantBits = static_cast<uint16_t>(roundf(clamp(f1, 0.0f, 1.0f) * 65535.0f));
+ uint16_t mostSignificantBits = static_cast<uint16_t>(roundf(clamp(f2, 0.0f, 1.0f) * 65535.0f));
+ return static_cast<uint32_t>(mostSignificantBits) << 16 |
+ static_cast<uint32_t>(leastSignificantBits);
+}
+
+// First, unpacks a single 32-bit unsigned integer u into a pair of 16-bit unsigned integers. Then,
+// each component is converted to a normalized floating-point value to generate the returned two
+// float values. The first float value will be extracted from the least significant bits of the
+// input; the last float value will be extracted from the most-significant bits. The conversion for
+// unpacked fixed-point value to floating point is done as follows: unpackUnorm2x16 : f / 65535.0
+inline void unpackUnorm2x16(uint32_t u, float *f1, float *f2)
+{
+ uint16_t leastSignificantBits = static_cast<uint16_t>(u & 0xFFFF);
+ uint16_t mostSignificantBits = static_cast<uint16_t>(u >> 16);
+ *f1 = static_cast<float>(leastSignificantBits) / 65535.0f;
+ *f2 = static_cast<float>(mostSignificantBits) / 65535.0f;
+}
+
+// Helper functions intended to be used only here.
+namespace priv
+{
+
+inline uint8_t ToPackedUnorm8(float f)
+{
+ return static_cast<uint8_t>(roundf(clamp(f, 0.0f, 1.0f) * 255.0f));
+}
+
+inline int8_t ToPackedSnorm8(float f)
+{
+ return static_cast<int8_t>(roundf(clamp(f, -1.0f, 1.0f) * 127.0f));
+}
+
+} // namespace priv
+
+// Packs 4 normalized unsigned floating-point values to a single 32-bit unsigned integer. Works
+// similarly to packUnorm2x16. The floats are clamped to the range 0.0 to 1.0, and written to the
+// unsigned integer starting from the least significant bits.
+inline uint32_t PackUnorm4x8(float f1, float f2, float f3, float f4)
+{
+ uint8_t bits[4];
+ bits[0] = priv::ToPackedUnorm8(f1);
+ bits[1] = priv::ToPackedUnorm8(f2);
+ bits[2] = priv::ToPackedUnorm8(f3);
+ bits[3] = priv::ToPackedUnorm8(f4);
+ uint32_t result = 0u;
+ for (int i = 0; i < 4; ++i)
+ {
+ int shift = i * 8;
+ result |= (static_cast<uint32_t>(bits[i]) << shift);
+ }
+ return result;
+}
+
+// Unpacks 4 normalized unsigned floating-point values from a single 32-bit unsigned integer into f.
+// Works similarly to unpackUnorm2x16. The floats are unpacked starting from the least significant
+// bits.
+inline void UnpackUnorm4x8(uint32_t u, float *f)
+{
+ for (int i = 0; i < 4; ++i)
+ {
+ int shift = i * 8;
+ uint8_t bits = static_cast<uint8_t>((u >> shift) & 0xFF);
+ f[i] = static_cast<float>(bits) / 255.0f;
+ }
+}
+
+// Packs 4 normalized signed floating-point values to a single 32-bit unsigned integer. The floats
+// are clamped to the range -1.0 to 1.0, and written to the unsigned integer starting from the least
+// significant bits.
+inline uint32_t PackSnorm4x8(float f1, float f2, float f3, float f4)
+{
+ int8_t bits[4];
+ bits[0] = priv::ToPackedSnorm8(f1);
+ bits[1] = priv::ToPackedSnorm8(f2);
+ bits[2] = priv::ToPackedSnorm8(f3);
+ bits[3] = priv::ToPackedSnorm8(f4);
+ uint32_t result = 0u;
+ for (int i = 0; i < 4; ++i)
+ {
+ int shift = i * 8;
+ result |= ((static_cast<uint32_t>(bits[i]) & 0xFF) << shift);
+ }
+ return result;
+}
+
+// Unpacks 4 normalized signed floating-point values from a single 32-bit unsigned integer into f.
+// Works similarly to unpackSnorm2x16. The floats are unpacked starting from the least significant
+// bits, and clamped to the range -1.0 to 1.0.
+inline void UnpackSnorm4x8(uint32_t u, float *f)
+{
+ for (int i = 0; i < 4; ++i)
+ {
+ int shift = i * 8;
+ int8_t bits = static_cast<int8_t>((u >> shift) & 0xFF);
+ f[i] = clamp(static_cast<float>(bits) / 127.0f, -1.0f, 1.0f);
+ }
+}
+
+// Returns an unsigned integer obtained by converting the two floating-point values to the 16-bit
+// floating-point representation found in the OpenGL ES Specification, and then packing these
+// two 16-bit integers into a 32-bit unsigned integer.
+// f1: The 16 least-significant bits of the result;
+// f2: The 16 most-significant bits.
+inline uint32_t packHalf2x16(float f1, float f2)
+{
+ uint16_t leastSignificantBits = static_cast<uint16_t>(float32ToFloat16(f1));
+ uint16_t mostSignificantBits = static_cast<uint16_t>(float32ToFloat16(f2));
+ return static_cast<uint32_t>(mostSignificantBits) << 16 |
+ static_cast<uint32_t>(leastSignificantBits);
+}
+
+// Returns two floating-point values obtained by unpacking a 32-bit unsigned integer into a pair of
+// 16-bit values, interpreting those values as 16-bit floating-point numbers according to the OpenGL
+// ES Specification, and converting them to 32-bit floating-point values. The first float value is
+// obtained from the 16 least-significant bits of u; the second component is obtained from the 16
+// most-significant bits of u.
+inline void unpackHalf2x16(uint32_t u, float *f1, float *f2)
+{
+ uint16_t leastSignificantBits = static_cast<uint16_t>(u & 0xFFFF);
+ uint16_t mostSignificantBits = static_cast<uint16_t>(u >> 16);
+
+ *f1 = float16ToFloat32(leastSignificantBits);
+ *f2 = float16ToFloat32(mostSignificantBits);
+}
+
+inline uint8_t sRGBToLinear(uint8_t srgbValue)
+{
+ float value = srgbValue / 255.0f;
+ if (value <= 0.04045f)
+ {
+ value = value / 12.92f;
+ }
+ else
+ {
+ value = std::pow((value + 0.055f) / 1.055f, 2.4f);
+ }
+ return static_cast<uint8_t>(clamp(value * 255.0f + 0.5f, 0.0f, 255.0f));
+}
+
+inline uint8_t linearToSRGB(uint8_t linearValue)
+{
+ float value = linearValue / 255.0f;
+ if (value <= 0.0f)
+ {
+ value = 0.0f;
+ }
+ else if (value < 0.0031308f)
+ {
+ value = value * 12.92f;
+ }
+ else if (value < 1.0f)
+ {
+ value = std::pow(value, 0.41666f) * 1.055f - 0.055f;
+ }
+ else
+ {
+ value = 1.0f;
+ }
+ return static_cast<uint8_t>(clamp(value * 255.0f + 0.5f, 0.0f, 255.0f));
+}
+
+// Reverse the order of the bits.
+inline uint32_t BitfieldReverse(uint32_t value)
+{
+ // TODO(oetuaho@nvidia.com): Optimize this if needed. There don't seem to be compiler intrinsics
+ // for this, and right now it's not used in performance-critical paths.
+ uint32_t result = 0u;
+ for (size_t j = 0u; j < 32u; ++j)
+ {
+ result |= (((value >> j) & 1u) << (31u - j));
+ }
+ return result;
+}
+
+// Count the 1 bits.
+#if defined(_MSC_VER) && !defined(__clang__)
+# if defined(_M_IX86) || defined(_M_X64)
+namespace priv
+{
+// Check POPCNT instruction support and cache the result.
+// https://docs.microsoft.com/en-us/cpp/intrinsics/popcnt16-popcnt-popcnt64#remarks
+static const bool kHasPopcnt = [] {
+ int info[4];
+ __cpuid(&info[0], 1);
+ return static_cast<bool>(info[2] & 0x800000);
+}();
+} // namespace priv
+
+// Polyfills for x86/x64 CPUs without POPCNT.
+// https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
+inline int BitCountPolyfill(uint32_t bits)
+{
+ bits = bits - ((bits >> 1) & 0x55555555);
+ bits = (bits & 0x33333333) + ((bits >> 2) & 0x33333333);
+ bits = ((bits + (bits >> 4) & 0x0F0F0F0F) * 0x01010101) >> 24;
+ return static_cast<int>(bits);
+}
+
+inline int BitCountPolyfill(uint64_t bits)
+{
+ bits = bits - ((bits >> 1) & 0x5555555555555555ull);
+ bits = (bits & 0x3333333333333333ull) + ((bits >> 2) & 0x3333333333333333ull);
+ bits = ((bits + (bits >> 4) & 0x0F0F0F0F0F0F0F0Full) * 0x0101010101010101ull) >> 56;
+ return static_cast<int>(bits);
+}
+
+inline int BitCount(uint32_t bits)
+{
+ if (priv::kHasPopcnt)
+ {
+ return static_cast<int>(__popcnt(bits));
+ }
+ return BitCountPolyfill(bits);
+}
+
+inline int BitCount(uint64_t bits)
+{
+ if (priv::kHasPopcnt)
+ {
+# if defined(_M_X64)
+ return static_cast<int>(__popcnt64(bits));
+# else // x86
+ return static_cast<int>(__popcnt(static_cast<uint32_t>(bits >> 32)) +
+ __popcnt(static_cast<uint32_t>(bits)));
+# endif // defined(_M_X64)
+ }
+ return BitCountPolyfill(bits);
+}
+
+# elif defined(_M_ARM) || defined(_M_ARM64)
+
+// MSVC's _CountOneBits* intrinsics are not defined for ARM64, moreover they do not use dedicated
+// NEON instructions.
+
+inline int BitCount(uint32_t bits)
+{
+ // cast bits to 8x8 datatype and use VCNT on it
+ const uint8x8_t vsum = vcnt_u8(vcreate_u8(static_cast<uint64_t>(bits)));
+
+ // pairwise sums: 8x8 -> 16x4 -> 32x2
+ return static_cast<int>(vget_lane_u32(vpaddl_u16(vpaddl_u8(vsum)), 0));
+}
+
+inline int BitCount(uint64_t bits)
+{
+ // cast bits to 8x8 datatype and use VCNT on it
+ const uint8x8_t vsum = vcnt_u8(vcreate_u8(bits));
+
+ // pairwise sums: 8x8 -> 16x4 -> 32x2 -> 64x1
+ return static_cast<int>(vget_lane_u64(vpaddl_u32(vpaddl_u16(vpaddl_u8(vsum))), 0));
+}
+# endif // defined(_M_IX86) || defined(_M_X64)
+#endif // defined(_MSC_VER) && !defined(__clang__)
+
+#if defined(ANGLE_PLATFORM_POSIX) || defined(__clang__)
+inline int BitCount(uint32_t bits)
+{
+ return __builtin_popcount(bits);
+}
+
+inline int BitCount(uint64_t bits)
+{
+ return __builtin_popcountll(bits);
+}
+#endif // defined(ANGLE_PLATFORM_POSIX) || defined(__clang__)
+
+inline int BitCount(uint8_t bits)
+{
+ return BitCount(static_cast<uint32_t>(bits));
+}
+
+inline int BitCount(uint16_t bits)
+{
+ return BitCount(static_cast<uint32_t>(bits));
+}
+
+#if defined(ANGLE_PLATFORM_WINDOWS)
+// Return the index of the least significant bit set. Indexing is such that bit 0 is the least
+// significant bit. Implemented for different bit widths on different platforms.
+inline unsigned long ScanForward(uint32_t bits)
+{
+ ASSERT(bits != 0u);
+ unsigned long firstBitIndex = 0ul;
+ unsigned char ret = _BitScanForward(&firstBitIndex, bits);
+ ASSERT(ret != 0u);
+ return firstBitIndex;
+}
+
+inline unsigned long ScanForward(uint64_t bits)
+{
+ ASSERT(bits != 0u);
+ unsigned long firstBitIndex = 0ul;
+# if defined(ANGLE_IS_64_BIT_CPU)
+ unsigned char ret = _BitScanForward64(&firstBitIndex, bits);
+# else
+ unsigned char ret;
+ if (static_cast<uint32_t>(bits) == 0)
+ {
+ ret = _BitScanForward(&firstBitIndex, static_cast<uint32_t>(bits >> 32));
+ firstBitIndex += 32ul;
+ }
+ else
+ {
+ ret = _BitScanForward(&firstBitIndex, static_cast<uint32_t>(bits));
+ }
+# endif // defined(ANGLE_IS_64_BIT_CPU)
+ ASSERT(ret != 0u);
+ return firstBitIndex;
+}
+
+// Return the index of the most significant bit set. Indexing is such that bit 0 is the least
+// significant bit.
+inline unsigned long ScanReverse(uint32_t bits)
+{
+ ASSERT(bits != 0u);
+ unsigned long lastBitIndex = 0ul;
+ unsigned char ret = _BitScanReverse(&lastBitIndex, bits);
+ ASSERT(ret != 0u);
+ return lastBitIndex;
+}
+
+inline unsigned long ScanReverse(uint64_t bits)
+{
+ ASSERT(bits != 0u);
+ unsigned long lastBitIndex = 0ul;
+# if defined(ANGLE_IS_64_BIT_CPU)
+ unsigned char ret = _BitScanReverse64(&lastBitIndex, bits);
+# else
+ unsigned char ret;
+ if (static_cast<uint32_t>(bits >> 32) == 0)
+ {
+ ret = _BitScanReverse(&lastBitIndex, static_cast<uint32_t>(bits));
+ }
+ else
+ {
+ ret = _BitScanReverse(&lastBitIndex, static_cast<uint32_t>(bits >> 32));
+ lastBitIndex += 32ul;
+ }
+# endif // defined(ANGLE_IS_64_BIT_CPU)
+ ASSERT(ret != 0u);
+ return lastBitIndex;
+}
+#endif // defined(ANGLE_PLATFORM_WINDOWS)
+
+#if defined(ANGLE_PLATFORM_POSIX)
+inline unsigned long ScanForward(uint32_t bits)
+{
+ ASSERT(bits != 0u);
+ return static_cast<unsigned long>(__builtin_ctz(bits));
+}
+
+inline unsigned long ScanForward(uint64_t bits)
+{
+ ASSERT(bits != 0u);
+# if defined(ANGLE_IS_64_BIT_CPU)
+ return static_cast<unsigned long>(__builtin_ctzll(bits));
+# else
+ return static_cast<unsigned long>(static_cast<uint32_t>(bits) == 0
+ ? __builtin_ctz(static_cast<uint32_t>(bits >> 32)) + 32
+ : __builtin_ctz(static_cast<uint32_t>(bits)));
+# endif // defined(ANGLE_IS_64_BIT_CPU)
+}
+
+inline unsigned long ScanReverse(uint32_t bits)
+{
+ ASSERT(bits != 0u);
+ return static_cast<unsigned long>(sizeof(uint32_t) * CHAR_BIT - 1 - __builtin_clz(bits));
+}
+
+inline unsigned long ScanReverse(uint64_t bits)
+{
+ ASSERT(bits != 0u);
+# if defined(ANGLE_IS_64_BIT_CPU)
+ return static_cast<unsigned long>(sizeof(uint64_t) * CHAR_BIT - 1 - __builtin_clzll(bits));
+# else
+ if (static_cast<uint32_t>(bits >> 32) == 0)
+ {
+ return sizeof(uint32_t) * CHAR_BIT - 1 - __builtin_clz(static_cast<uint32_t>(bits));
+ }
+ else
+ {
+ return sizeof(uint32_t) * CHAR_BIT - 1 - __builtin_clz(static_cast<uint32_t>(bits >> 32)) +
+ 32;
+ }
+# endif // defined(ANGLE_IS_64_BIT_CPU)
+}
+#endif // defined(ANGLE_PLATFORM_POSIX)
+
+inline unsigned long ScanForward(uint8_t bits)
+{
+ return ScanForward(static_cast<uint32_t>(bits));
+}
+
+inline unsigned long ScanForward(uint16_t bits)
+{
+ return ScanForward(static_cast<uint32_t>(bits));
+}
+
+inline unsigned long ScanReverse(uint8_t bits)
+{
+ return ScanReverse(static_cast<uint32_t>(bits));
+}
+
+inline unsigned long ScanReverse(uint16_t bits)
+{
+ return ScanReverse(static_cast<uint32_t>(bits));
+}
+
+// Returns -1 on 0, otherwise the index of the least significant 1 bit as in GLSL.
+template <typename T>
+int FindLSB(T bits)
+{
+ static_assert(std::is_integral<T>::value, "must be integral type.");
+ if (bits == 0u)
+ {
+ return -1;
+ }
+ else
+ {
+ return static_cast<int>(ScanForward(bits));
+ }
+}
+
+// Returns -1 on 0, otherwise the index of the most significant 1 bit as in GLSL.
+template <typename T>
+int FindMSB(T bits)
+{
+ static_assert(std::is_integral<T>::value, "must be integral type.");
+ if (bits == 0u)
+ {
+ return -1;
+ }
+ else
+ {
+ return static_cast<int>(ScanReverse(bits));
+ }
+}
+
+// Returns whether the argument is Not a Number.
+// IEEE 754 single precision NaN representation: Exponent(8 bits) - 255, Mantissa(23 bits) -
+// non-zero.
+inline bool isNaN(float f)
+{
+ // Exponent mask: ((1u << 8) - 1u) << 23 = 0x7f800000u
+ // Mantissa mask: ((1u << 23) - 1u) = 0x7fffffu
+ return ((bitCast<uint32_t>(f) & 0x7f800000u) == 0x7f800000u) &&
+ (bitCast<uint32_t>(f) & 0x7fffffu);
+}
+
+// Returns whether the argument is infinity.
+// IEEE 754 single precision infinity representation: Exponent(8 bits) - 255, Mantissa(23 bits) -
+// zero.
+inline bool isInf(float f)
+{
+ // Exponent mask: ((1u << 8) - 1u) << 23 = 0x7f800000u
+ // Mantissa mask: ((1u << 23) - 1u) = 0x7fffffu
+ return ((bitCast<uint32_t>(f) & 0x7f800000u) == 0x7f800000u) &&
+ !(bitCast<uint32_t>(f) & 0x7fffffu);
+}
+
+namespace priv
+{
+template <unsigned int N, unsigned int R>
+struct iSquareRoot
+{
+ static constexpr unsigned int solve()
+ {
+ return (R * R > N)
+ ? 0
+ : ((R * R == N) ? R : static_cast<unsigned int>(iSquareRoot<N, R + 1>::value));
+ }
+ enum Result
+ {
+ value = iSquareRoot::solve()
+ };
+};
+
+template <unsigned int N>
+struct iSquareRoot<N, N>
+{
+ enum result
+ {
+ value = N
+ };
+};
+
+} // namespace priv
+
+template <unsigned int N>
+constexpr unsigned int iSquareRoot()
+{
+ return priv::iSquareRoot<N, 1>::value;
+}
+
+// Sum, difference and multiplication operations for signed ints that wrap on 32-bit overflow.
+//
+// Unsigned types are defined to do arithmetic modulo 2^n in C++. For signed types, overflow
+// behavior is undefined.
+
+template <typename T>
+inline T WrappingSum(T lhs, T rhs)
+{
+ uint32_t lhsUnsigned = static_cast<uint32_t>(lhs);
+ uint32_t rhsUnsigned = static_cast<uint32_t>(rhs);
+ return static_cast<T>(lhsUnsigned + rhsUnsigned);
+}
+
+template <typename T>
+inline T WrappingDiff(T lhs, T rhs)
+{
+ uint32_t lhsUnsigned = static_cast<uint32_t>(lhs);
+ uint32_t rhsUnsigned = static_cast<uint32_t>(rhs);
+ return static_cast<T>(lhsUnsigned - rhsUnsigned);
+}
+
+inline int32_t WrappingMul(int32_t lhs, int32_t rhs)
+{
+ int64_t lhsWide = static_cast<int64_t>(lhs);
+ int64_t rhsWide = static_cast<int64_t>(rhs);
+ // The multiplication is guaranteed not to overflow.
+ int64_t resultWide = lhsWide * rhsWide;
+ // Implement the desired wrapping behavior by masking out the high-order 32 bits.
+ resultWide = resultWide & 0xffffffffLL;
+ // Casting to a narrower signed type is fine since the casted value is representable in the
+ // narrower type.
+ return static_cast<int32_t>(resultWide);
+}
+
+inline float scaleScreenDimensionToNdc(float dimensionScreen, float viewportDimension)
+{
+ return 2.0f * dimensionScreen / viewportDimension;
+}
+
+inline float scaleScreenCoordinateToNdc(float coordinateScreen, float viewportDimension)
+{
+ float halfShifted = coordinateScreen / viewportDimension;
+ return 2.0f * (halfShifted - 0.5f);
+}
+
+} // namespace gl
+
+namespace rx
+{
+
+template <typename T>
+T roundUp(const T value, const T alignment)
+{
+ auto temp = value + alignment - static_cast<T>(1);
+ return temp - temp % alignment;
+}
+
+template <typename T>
+constexpr T roundUpPow2(const T value, const T alignment)
+{
+ ASSERT(gl::isPow2(alignment));
+ return (value + alignment - 1) & ~(alignment - 1);
+}
+
+template <typename T>
+constexpr T roundDownPow2(const T value, const T alignment)
+{
+ ASSERT(gl::isPow2(alignment));
+ return value & ~(alignment - 1);
+}
+
+template <typename T>
+angle::CheckedNumeric<T> CheckedRoundUp(const T value, const T alignment)
+{
+ angle::CheckedNumeric<T> checkedValue(value);
+ angle::CheckedNumeric<T> checkedAlignment(alignment);
+ return roundUp(checkedValue, checkedAlignment);
+}
+
+inline constexpr unsigned int UnsignedCeilDivide(unsigned int value, unsigned int divisor)
+{
+ unsigned int divided = value / divisor;
+ return (divided + ((value % divisor == 0) ? 0 : 1));
+}
+
+#if defined(__has_builtin)
+# define ANGLE_HAS_BUILTIN(x) __has_builtin(x)
+#else
+# define ANGLE_HAS_BUILTIN(x) 0
+#endif
+
+#if defined(_MSC_VER)
+
+# define ANGLE_ROTL(x, y) _rotl(x, y)
+# define ANGLE_ROTL64(x, y) _rotl64(x, y)
+# define ANGLE_ROTR16(x, y) _rotr16(x, y)
+
+#elif defined(__clang__) && ANGLE_HAS_BUILTIN(__builtin_rotateleft32) && \
+ ANGLE_HAS_BUILTIN(__builtin_rotateleft64) && ANGLE_HAS_BUILTIN(__builtin_rotateright16)
+
+# define ANGLE_ROTL(x, y) __builtin_rotateleft32(x, y)
+# define ANGLE_ROTL64(x, y) __builtin_rotateleft64(x, y)
+# define ANGLE_ROTR16(x, y) __builtin_rotateright16(x, y)
+
+#else
+
+inline uint32_t RotL(uint32_t x, int8_t r)
+{
+ return (x << r) | (x >> (32 - r));
+}
+
+inline uint64_t RotL64(uint64_t x, int8_t r)
+{
+ return (x << r) | (x >> (64 - r));
+}
+
+inline uint16_t RotR16(uint16_t x, int8_t r)
+{
+ return (x >> r) | (x << (16 - r));
+}
+
+# define ANGLE_ROTL(x, y) ::rx::RotL(x, y)
+# define ANGLE_ROTL64(x, y) ::rx::RotL64(x, y)
+# define ANGLE_ROTR16(x, y) ::rx::RotR16(x, y)
+
+#endif // namespace rx
+
+constexpr unsigned int Log2(unsigned int bytes)
+{
+ return bytes == 1 ? 0 : (1 + Log2(bytes / 2));
+}
+} // namespace rx
+
+#endif // COMMON_MATHUTIL_H_
diff --git a/gfx/angle/checkout/src/common/matrix_utils.cpp b/gfx/angle/checkout/src/common/matrix_utils.cpp
new file mode 100644
index 0000000000..59ab4ca437
--- /dev/null
+++ b/gfx/angle/checkout/src/common/matrix_utils.cpp
@@ -0,0 +1,285 @@
+//
+// Copyright 2018 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+
+// matrix_utils.cpp: Contains implementations for Mat4 methods.
+
+#include "common/matrix_utils.h"
+
+namespace angle
+{
+
+Mat4::Mat4() : Mat4(1.f, 0.f, 0.f, 0.f, 0.f, 1.f, 0.f, 0.f, 0.f, 0.f, 1.f, 0.f, 0.f, 0.f, 0.f, 1.f)
+{}
+
+Mat4::Mat4(const Matrix<float> generalMatrix) : Matrix(std::vector<float>(16, 0), 4, 4)
+{
+ unsigned int minCols = std::min((unsigned int)4, generalMatrix.columns());
+ unsigned int minRows = std::min((unsigned int)4, generalMatrix.rows());
+ for (unsigned int i = 0; i < minCols; i++)
+ {
+ for (unsigned int j = 0; j < minRows; j++)
+ {
+ mElements[j * minCols + i] = generalMatrix.at(j, i);
+ }
+ }
+}
+
+Mat4::Mat4(const std::vector<float> &elements) : Matrix(elements, 4) {}
+
+Mat4::Mat4(const float *elements) : Matrix(elements, 4) {}
+
+Mat4::Mat4(float m00,
+ float m01,
+ float m02,
+ float m03,
+ float m10,
+ float m11,
+ float m12,
+ float m13,
+ float m20,
+ float m21,
+ float m22,
+ float m23,
+ float m30,
+ float m31,
+ float m32,
+ float m33)
+ : Matrix(std::vector<float>(16, 0), 4, 4)
+{
+ mElements[0] = m00;
+ mElements[1] = m01;
+ mElements[2] = m02;
+ mElements[3] = m03;
+ mElements[4] = m10;
+ mElements[5] = m11;
+ mElements[6] = m12;
+ mElements[7] = m13;
+ mElements[8] = m20;
+ mElements[9] = m21;
+ mElements[10] = m22;
+ mElements[11] = m23;
+ mElements[12] = m30;
+ mElements[13] = m31;
+ mElements[14] = m32;
+ mElements[15] = m33;
+}
+
+// static
+Mat4 Mat4::Rotate(float angle, const Vector3 &axis)
+{
+ auto axis_normalized = axis.normalized();
+ float angle_radians = angle * (3.14159265358979323f / 180.0f);
+ float c = cos(angle_radians);
+ float ci = 1.f - c;
+ float s = sin(angle_radians);
+
+ float x = axis_normalized.x();
+ float y = axis_normalized.y();
+ float z = axis_normalized.z();
+
+ float x2 = x * x;
+ float y2 = y * y;
+ float z2 = z * z;
+
+ float xy = x * y;
+ float yz = y * z;
+ float zx = z * x;
+
+ float r00 = c + ci * x2;
+ float r01 = ci * xy + s * z;
+ float r02 = ci * zx - s * y;
+ float r03 = 0.f;
+
+ float r10 = ci * xy - s * z;
+ float r11 = c + ci * y2;
+ float r12 = ci * yz + s * x;
+ float r13 = 0.f;
+
+ float r20 = ci * zx + s * y;
+ float r21 = ci * yz - s * x;
+ float r22 = c + ci * z2;
+ float r23 = 0.f;
+
+ float r30 = 0.f;
+ float r31 = 0.f;
+ float r32 = 0.f;
+ float r33 = 1.f;
+
+ return Mat4(r00, r01, r02, r03, r10, r11, r12, r13, r20, r21, r22, r23, r30, r31, r32, r33);
+}
+
+// static
+Mat4 Mat4::Translate(const Vector3 &t)
+{
+ float r00 = 1.f;
+ float r01 = 0.f;
+ float r02 = 0.f;
+ float r03 = 0.f;
+
+ float r10 = 0.f;
+ float r11 = 1.f;
+ float r12 = 0.f;
+ float r13 = 0.f;
+
+ float r20 = 0.f;
+ float r21 = 0.f;
+ float r22 = 1.f;
+ float r23 = 0.f;
+
+ float r30 = t.x();
+ float r31 = t.y();
+ float r32 = t.z();
+ float r33 = 1.f;
+
+ return Mat4(r00, r01, r02, r03, r10, r11, r12, r13, r20, r21, r22, r23, r30, r31, r32, r33);
+}
+
+// static
+Mat4 Mat4::Scale(const Vector3 &s)
+{
+ float r00 = s.x();
+ float r01 = 0.f;
+ float r02 = 0.f;
+ float r03 = 0.f;
+
+ float r10 = 0.f;
+ float r11 = s.y();
+ float r12 = 0.f;
+ float r13 = 0.f;
+
+ float r20 = 0.f;
+ float r21 = 0.f;
+ float r22 = s.z();
+ float r23 = 0.f;
+
+ float r30 = 0.f;
+ float r31 = 0.f;
+ float r32 = 0.f;
+ float r33 = 1.f;
+
+ return Mat4(r00, r01, r02, r03, r10, r11, r12, r13, r20, r21, r22, r23, r30, r31, r32, r33);
+}
+
+// static
+Mat4 Mat4::Frustum(float l, float r, float b, float t, float n, float f)
+{
+ float nn = 2.f * n;
+ float fpn = f + n;
+ float fmn = f - n;
+ float tpb = t + b;
+ float tmb = t - b;
+ float rpl = r + l;
+ float rml = r - l;
+
+ float r00 = nn / rml;
+ float r01 = 0.f;
+ float r02 = 0.f;
+ float r03 = 0.f;
+
+ float r10 = 0.f;
+ float r11 = nn / tmb;
+ float r12 = 0.f;
+ float r13 = 0.f;
+
+ float r20 = rpl / rml;
+ float r21 = tpb / tmb;
+ float r22 = -fpn / fmn;
+ float r23 = -1.f;
+
+ float r30 = 0.f;
+ float r31 = 0.f;
+ float r32 = -nn * f / fmn;
+ float r33 = 0.f;
+
+ return Mat4(r00, r01, r02, r03, r10, r11, r12, r13, r20, r21, r22, r23, r30, r31, r32, r33);
+}
+
+// static
+Mat4 Mat4::Perspective(float fov, float aspectRatio, float n, float f)
+{
+ const float frustumHeight = tanf(static_cast<float>(fov / 360.0f * 3.14159265358979323)) * n;
+ const float frustumWidth = frustumHeight * aspectRatio;
+ return Frustum(-frustumWidth, frustumWidth, -frustumHeight, frustumHeight, n, f);
+}
+
+// static
+Mat4 Mat4::Ortho(float l, float r, float b, float t, float n, float f)
+{
+ float fpn = f + n;
+ float fmn = f - n;
+ float tpb = t + b;
+ float tmb = t - b;
+ float rpl = r + l;
+ float rml = r - l;
+
+ float r00 = 2.f / rml;
+ float r01 = 0.f;
+ float r02 = 0.f;
+ float r03 = 0.f;
+
+ float r10 = 0.f;
+ float r11 = 2.f / tmb;
+ float r12 = 0.f;
+ float r13 = 0.f;
+
+ float r20 = 0.f;
+ float r21 = 0.f;
+ float r22 = -2.f / fmn;
+ float r23 = 0.f;
+
+ float r30 = -rpl / rml;
+ float r31 = -tpb / tmb;
+ float r32 = -fpn / fmn;
+ float r33 = 1.f;
+
+ return Mat4(r00, r01, r02, r03, r10, r11, r12, r13, r20, r21, r22, r23, r30, r31, r32, r33);
+}
+
+Mat4 Mat4::product(const Mat4 &m)
+{
+ const float *a = mElements.data();
+ const float *b = m.mElements.data();
+
+ return Mat4(a[0] * b[0] + a[4] * b[1] + a[8] * b[2] + a[12] * b[3],
+ a[1] * b[0] + a[5] * b[1] + a[9] * b[2] + a[13] * b[3],
+ a[2] * b[0] + a[6] * b[1] + a[10] * b[2] + a[14] * b[3],
+ a[3] * b[0] + a[7] * b[1] + a[11] * b[2] + a[15] * b[3],
+
+ a[0] * b[4] + a[4] * b[5] + a[8] * b[6] + a[12] * b[7],
+ a[1] * b[4] + a[5] * b[5] + a[9] * b[6] + a[13] * b[7],
+ a[2] * b[4] + a[6] * b[5] + a[10] * b[6] + a[14] * b[7],
+ a[3] * b[4] + a[7] * b[5] + a[11] * b[6] + a[15] * b[7],
+
+ a[0] * b[8] + a[4] * b[9] + a[8] * b[10] + a[12] * b[11],
+ a[1] * b[8] + a[5] * b[9] + a[9] * b[10] + a[13] * b[11],
+ a[2] * b[8] + a[6] * b[9] + a[10] * b[10] + a[14] * b[11],
+ a[3] * b[8] + a[7] * b[9] + a[11] * b[10] + a[15] * b[11],
+
+ a[0] * b[12] + a[4] * b[13] + a[8] * b[14] + a[12] * b[15],
+ a[1] * b[12] + a[5] * b[13] + a[9] * b[14] + a[13] * b[15],
+ a[2] * b[12] + a[6] * b[13] + a[10] * b[14] + a[14] * b[15],
+ a[3] * b[12] + a[7] * b[13] + a[11] * b[14] + a[15] * b[15]);
+}
+
+Vector4 Mat4::product(const Vector4 &b)
+{
+ return Vector4(
+ mElements[0] * b.x() + mElements[4] * b.y() + mElements[8] * b.z() + mElements[12] * b.w(),
+ mElements[1] * b.x() + mElements[5] * b.y() + mElements[9] * b.z() + mElements[13] * b.w(),
+ mElements[2] * b.x() + mElements[6] * b.y() + mElements[10] * b.z() + mElements[14] * b.w(),
+ mElements[3] * b.x() + mElements[7] * b.y() + mElements[11] * b.z() +
+ mElements[15] * b.w());
+}
+
+void Mat4::dump()
+{
+ printf("[ %f %f %f %f ]\n", mElements[0], mElements[4], mElements[8], mElements[12]);
+ printf("[ %f %f %f %f ]\n", mElements[1], mElements[5], mElements[9], mElements[13]);
+ printf("[ %f %f %f %f ]\n", mElements[2], mElements[6], mElements[10], mElements[14]);
+ printf("[ %f %f %f %f ]\n", mElements[3], mElements[7], mElements[11], mElements[15]);
+}
+
+} // namespace angle
diff --git a/gfx/angle/checkout/src/common/matrix_utils.h b/gfx/angle/checkout/src/common/matrix_utils.h
new file mode 100644
index 0000000000..7cca7a9461
--- /dev/null
+++ b/gfx/angle/checkout/src/common/matrix_utils.h
@@ -0,0 +1,424 @@
+//
+// Copyright 2015 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Matrix:
+// Utility class implementing various matrix operations.
+// Supports matrices with minimum 2 and maximum 4 number of rows/columns.
+//
+// TODO: Check if we can merge Matrix.h in sample_util with this and replace it with this
+// implementation.
+// TODO: Rename this file to Matrix.h once we remove Matrix.h in sample_util.
+
+#ifndef COMMON_MATRIX_UTILS_H_
+#define COMMON_MATRIX_UTILS_H_
+
+#include <vector>
+
+#include "common/debug.h"
+#include "common/mathutil.h"
+#include "common/vector_utils.h"
+
+namespace angle
+{
+
+template <typename T>
+class Matrix
+{
+ public:
+ Matrix(const std::vector<T> &elements, const unsigned int numRows, const unsigned int numCols)
+ : mElements(elements), mRows(numRows), mCols(numCols)
+ {
+ ASSERT(rows() >= 1 && rows() <= 4);
+ ASSERT(columns() >= 1 && columns() <= 4);
+ }
+
+ Matrix(const std::vector<T> &elements, const unsigned int size)
+ : mElements(elements), mRows(size), mCols(size)
+ {
+ ASSERT(rows() >= 1 && rows() <= 4);
+ ASSERT(columns() >= 1 && columns() <= 4);
+ }
+
+ Matrix(const T *elements, const unsigned int size) : mRows(size), mCols(size)
+ {
+ ASSERT(rows() >= 1 && rows() <= 4);
+ ASSERT(columns() >= 1 && columns() <= 4);
+ for (size_t i = 0; i < size * size; i++)
+ mElements.push_back(elements[i]);
+ }
+
+ const T &operator()(const unsigned int rowIndex, const unsigned int columnIndex) const
+ {
+ ASSERT(rowIndex < mRows);
+ ASSERT(columnIndex < mCols);
+ return mElements[rowIndex * columns() + columnIndex];
+ }
+
+ T &operator()(const unsigned int rowIndex, const unsigned int columnIndex)
+ {
+ ASSERT(rowIndex < mRows);
+ ASSERT(columnIndex < mCols);
+ return mElements[rowIndex * columns() + columnIndex];
+ }
+
+ const T &at(const unsigned int rowIndex, const unsigned int columnIndex) const
+ {
+ ASSERT(rowIndex < mRows);
+ ASSERT(columnIndex < mCols);
+ return operator()(rowIndex, columnIndex);
+ }
+
+ Matrix<T> operator*(const Matrix<T> &m)
+ {
+ ASSERT(columns() == m.rows());
+
+ unsigned int resultRows = rows();
+ unsigned int resultCols = m.columns();
+ Matrix<T> result(std::vector<T>(resultRows * resultCols), resultRows, resultCols);
+ for (unsigned int i = 0; i < resultRows; i++)
+ {
+ for (unsigned int j = 0; j < resultCols; j++)
+ {
+ T tmp = 0.0f;
+ for (unsigned int k = 0; k < columns(); k++)
+ tmp += at(i, k) * m(k, j);
+ result(i, j) = tmp;
+ }
+ }
+
+ return result;
+ }
+
+ void operator*=(const Matrix<T> &m)
+ {
+ ASSERT(columns() == m.rows());
+ Matrix<T> res = (*this) * m;
+ size_t numElts = res.elements().size();
+ mElements.resize(numElts);
+ memcpy(mElements.data(), res.data(), numElts * sizeof(float));
+ }
+
+ bool operator==(const Matrix<T> &m) const
+ {
+ ASSERT(columns() == m.columns());
+ ASSERT(rows() == m.rows());
+ return mElements == m.elements();
+ }
+
+ bool operator!=(const Matrix<T> &m) const { return !(mElements == m.elements()); }
+
+ bool nearlyEqual(T epsilon, const Matrix<T> &m) const
+ {
+ ASSERT(columns() == m.columns());
+ ASSERT(rows() == m.rows());
+ const auto &otherElts = m.elements();
+ for (size_t i = 0; i < otherElts.size(); i++)
+ {
+ if ((mElements[i] - otherElts[i] > epsilon) && (otherElts[i] - mElements[i] > epsilon))
+ return false;
+ }
+ return true;
+ }
+
+ unsigned int size() const
+ {
+ ASSERT(rows() == columns());
+ return rows();
+ }
+
+ unsigned int rows() const { return mRows; }
+
+ unsigned int columns() const { return mCols; }
+
+ std::vector<T> elements() const { return mElements; }
+ T *data() { return mElements.data(); }
+ const T *constData() const { return mElements.data(); }
+
+ Matrix<T> compMult(const Matrix<T> &mat1) const
+ {
+ Matrix result(std::vector<T>(mElements.size()), rows(), columns());
+ for (unsigned int i = 0; i < rows(); i++)
+ {
+ for (unsigned int j = 0; j < columns(); j++)
+ {
+ T lhs = at(i, j);
+ T rhs = mat1(i, j);
+ result(i, j) = rhs * lhs;
+ }
+ }
+
+ return result;
+ }
+
+ Matrix<T> outerProduct(const Matrix<T> &mat1) const
+ {
+ unsigned int cols = mat1.columns();
+ Matrix result(std::vector<T>(rows() * cols), rows(), cols);
+ for (unsigned int i = 0; i < rows(); i++)
+ for (unsigned int j = 0; j < cols; j++)
+ result(i, j) = at(i, 0) * mat1(0, j);
+
+ return result;
+ }
+
+ Matrix<T> transpose() const
+ {
+ Matrix result(std::vector<T>(mElements.size()), columns(), rows());
+ for (unsigned int i = 0; i < columns(); i++)
+ for (unsigned int j = 0; j < rows(); j++)
+ result(i, j) = at(j, i);
+
+ return result;
+ }
+
+ T determinant() const
+ {
+ ASSERT(rows() == columns());
+
+ switch (size())
+ {
+ case 2:
+ return at(0, 0) * at(1, 1) - at(0, 1) * at(1, 0);
+
+ case 3:
+ return at(0, 0) * at(1, 1) * at(2, 2) + at(0, 1) * at(1, 2) * at(2, 0) +
+ at(0, 2) * at(1, 0) * at(2, 1) - at(0, 2) * at(1, 1) * at(2, 0) -
+ at(0, 1) * at(1, 0) * at(2, 2) - at(0, 0) * at(1, 2) * at(2, 1);
+
+ case 4:
+ {
+ const float minorMatrices[4][3 * 3] = {{
+ at(1, 1),
+ at(2, 1),
+ at(3, 1),
+ at(1, 2),
+ at(2, 2),
+ at(3, 2),
+ at(1, 3),
+ at(2, 3),
+ at(3, 3),
+ },
+ {
+ at(1, 0),
+ at(2, 0),
+ at(3, 0),
+ at(1, 2),
+ at(2, 2),
+ at(3, 2),
+ at(1, 3),
+ at(2, 3),
+ at(3, 3),
+ },
+ {
+ at(1, 0),
+ at(2, 0),
+ at(3, 0),
+ at(1, 1),
+ at(2, 1),
+ at(3, 1),
+ at(1, 3),
+ at(2, 3),
+ at(3, 3),
+ },
+ {
+ at(1, 0),
+ at(2, 0),
+ at(3, 0),
+ at(1, 1),
+ at(2, 1),
+ at(3, 1),
+ at(1, 2),
+ at(2, 2),
+ at(3, 2),
+ }};
+ return at(0, 0) * Matrix<T>(minorMatrices[0], 3).determinant() -
+ at(0, 1) * Matrix<T>(minorMatrices[1], 3).determinant() +
+ at(0, 2) * Matrix<T>(minorMatrices[2], 3).determinant() -
+ at(0, 3) * Matrix<T>(minorMatrices[3], 3).determinant();
+ }
+
+ default:
+ UNREACHABLE();
+ break;
+ }
+
+ return T();
+ }
+
+ Matrix<T> inverse() const
+ {
+ ASSERT(rows() == columns());
+
+ Matrix<T> cof(std::vector<T>(mElements.size()), rows(), columns());
+ switch (size())
+ {
+ case 2:
+ cof(0, 0) = at(1, 1);
+ cof(0, 1) = -at(1, 0);
+ cof(1, 0) = -at(0, 1);
+ cof(1, 1) = at(0, 0);
+ break;
+
+ case 3:
+ cof(0, 0) = at(1, 1) * at(2, 2) - at(2, 1) * at(1, 2);
+ cof(0, 1) = -(at(1, 0) * at(2, 2) - at(2, 0) * at(1, 2));
+ cof(0, 2) = at(1, 0) * at(2, 1) - at(2, 0) * at(1, 1);
+ cof(1, 0) = -(at(0, 1) * at(2, 2) - at(2, 1) * at(0, 2));
+ cof(1, 1) = at(0, 0) * at(2, 2) - at(2, 0) * at(0, 2);
+ cof(1, 2) = -(at(0, 0) * at(2, 1) - at(2, 0) * at(0, 1));
+ cof(2, 0) = at(0, 1) * at(1, 2) - at(1, 1) * at(0, 2);
+ cof(2, 1) = -(at(0, 0) * at(1, 2) - at(1, 0) * at(0, 2));
+ cof(2, 2) = at(0, 0) * at(1, 1) - at(1, 0) * at(0, 1);
+ break;
+
+ case 4:
+ cof(0, 0) = at(1, 1) * at(2, 2) * at(3, 3) + at(2, 1) * at(3, 2) * at(1, 3) +
+ at(3, 1) * at(1, 2) * at(2, 3) - at(1, 1) * at(3, 2) * at(2, 3) -
+ at(2, 1) * at(1, 2) * at(3, 3) - at(3, 1) * at(2, 2) * at(1, 3);
+ cof(0, 1) = -(at(1, 0) * at(2, 2) * at(3, 3) + at(2, 0) * at(3, 2) * at(1, 3) +
+ at(3, 0) * at(1, 2) * at(2, 3) - at(1, 0) * at(3, 2) * at(2, 3) -
+ at(2, 0) * at(1, 2) * at(3, 3) - at(3, 0) * at(2, 2) * at(1, 3));
+ cof(0, 2) = at(1, 0) * at(2, 1) * at(3, 3) + at(2, 0) * at(3, 1) * at(1, 3) +
+ at(3, 0) * at(1, 1) * at(2, 3) - at(1, 0) * at(3, 1) * at(2, 3) -
+ at(2, 0) * at(1, 1) * at(3, 3) - at(3, 0) * at(2, 1) * at(1, 3);
+ cof(0, 3) = -(at(1, 0) * at(2, 1) * at(3, 2) + at(2, 0) * at(3, 1) * at(1, 2) +
+ at(3, 0) * at(1, 1) * at(2, 2) - at(1, 0) * at(3, 1) * at(2, 2) -
+ at(2, 0) * at(1, 1) * at(3, 2) - at(3, 0) * at(2, 1) * at(1, 2));
+ cof(1, 0) = -(at(0, 1) * at(2, 2) * at(3, 3) + at(2, 1) * at(3, 2) * at(0, 3) +
+ at(3, 1) * at(0, 2) * at(2, 3) - at(0, 1) * at(3, 2) * at(2, 3) -
+ at(2, 1) * at(0, 2) * at(3, 3) - at(3, 1) * at(2, 2) * at(0, 3));
+ cof(1, 1) = at(0, 0) * at(2, 2) * at(3, 3) + at(2, 0) * at(3, 2) * at(0, 3) +
+ at(3, 0) * at(0, 2) * at(2, 3) - at(0, 0) * at(3, 2) * at(2, 3) -
+ at(2, 0) * at(0, 2) * at(3, 3) - at(3, 0) * at(2, 2) * at(0, 3);
+ cof(1, 2) = -(at(0, 0) * at(2, 1) * at(3, 3) + at(2, 0) * at(3, 1) * at(0, 3) +
+ at(3, 0) * at(0, 1) * at(2, 3) - at(0, 0) * at(3, 1) * at(2, 3) -
+ at(2, 0) * at(0, 1) * at(3, 3) - at(3, 0) * at(2, 1) * at(0, 3));
+ cof(1, 3) = at(0, 0) * at(2, 1) * at(3, 2) + at(2, 0) * at(3, 1) * at(0, 2) +
+ at(3, 0) * at(0, 1) * at(2, 2) - at(0, 0) * at(3, 1) * at(2, 2) -
+ at(2, 0) * at(0, 1) * at(3, 2) - at(3, 0) * at(2, 1) * at(0, 2);
+ cof(2, 0) = at(0, 1) * at(1, 2) * at(3, 3) + at(1, 1) * at(3, 2) * at(0, 3) +
+ at(3, 1) * at(0, 2) * at(1, 3) - at(0, 1) * at(3, 2) * at(1, 3) -
+ at(1, 1) * at(0, 2) * at(3, 3) - at(3, 1) * at(1, 2) * at(0, 3);
+ cof(2, 1) = -(at(0, 0) * at(1, 2) * at(3, 3) + at(1, 0) * at(3, 2) * at(0, 3) +
+ at(3, 0) * at(0, 2) * at(1, 3) - at(0, 0) * at(3, 2) * at(1, 3) -
+ at(1, 0) * at(0, 2) * at(3, 3) - at(3, 0) * at(1, 2) * at(0, 3));
+ cof(2, 2) = at(0, 0) * at(1, 1) * at(3, 3) + at(1, 0) * at(3, 1) * at(0, 3) +
+ at(3, 0) * at(0, 1) * at(1, 3) - at(0, 0) * at(3, 1) * at(1, 3) -
+ at(1, 0) * at(0, 1) * at(3, 3) - at(3, 0) * at(1, 1) * at(0, 3);
+ cof(2, 3) = -(at(0, 0) * at(1, 1) * at(3, 2) + at(1, 0) * at(3, 1) * at(0, 2) +
+ at(3, 0) * at(0, 1) * at(1, 2) - at(0, 0) * at(3, 1) * at(1, 2) -
+ at(1, 0) * at(0, 1) * at(3, 2) - at(3, 0) * at(1, 1) * at(0, 2));
+ cof(3, 0) = -(at(0, 1) * at(1, 2) * at(2, 3) + at(1, 1) * at(2, 2) * at(0, 3) +
+ at(2, 1) * at(0, 2) * at(1, 3) - at(0, 1) * at(2, 2) * at(1, 3) -
+ at(1, 1) * at(0, 2) * at(2, 3) - at(2, 1) * at(1, 2) * at(0, 3));
+ cof(3, 1) = at(0, 0) * at(1, 2) * at(2, 3) + at(1, 0) * at(2, 2) * at(0, 3) +
+ at(2, 0) * at(0, 2) * at(1, 3) - at(0, 0) * at(2, 2) * at(1, 3) -
+ at(1, 0) * at(0, 2) * at(2, 3) - at(2, 0) * at(1, 2) * at(0, 3);
+ cof(3, 2) = -(at(0, 0) * at(1, 1) * at(2, 3) + at(1, 0) * at(2, 1) * at(0, 3) +
+ at(2, 0) * at(0, 1) * at(1, 3) - at(0, 0) * at(2, 1) * at(1, 3) -
+ at(1, 0) * at(0, 1) * at(2, 3) - at(2, 0) * at(1, 1) * at(0, 3));
+ cof(3, 3) = at(0, 0) * at(1, 1) * at(2, 2) + at(1, 0) * at(2, 1) * at(0, 2) +
+ at(2, 0) * at(0, 1) * at(1, 2) - at(0, 0) * at(2, 1) * at(1, 2) -
+ at(1, 0) * at(0, 1) * at(2, 2) - at(2, 0) * at(1, 1) * at(0, 2);
+ break;
+
+ default:
+ UNREACHABLE();
+ break;
+ }
+
+ // The inverse of A is the transpose of the cofactor matrix times the reciprocal of the
+ // determinant of A.
+ Matrix<T> adjugateMatrix(cof.transpose());
+ T det = determinant();
+ Matrix<T> result(std::vector<T>(mElements.size()), rows(), columns());
+ for (unsigned int i = 0; i < rows(); i++)
+ for (unsigned int j = 0; j < columns(); j++)
+ result(i, j) = (det != static_cast<T>(0)) ? adjugateMatrix(i, j) / det : T();
+
+ return result;
+ }
+
+ void setToIdentity()
+ {
+ ASSERT(rows() == columns());
+
+ const auto one = T(1);
+ const auto zero = T(0);
+
+ for (auto &e : mElements)
+ e = zero;
+
+ for (unsigned int i = 0; i < rows(); ++i)
+ {
+ const auto pos = i * columns() + (i % columns());
+ mElements[pos] = one;
+ }
+ }
+
+ template <unsigned int Size>
+ static void setToIdentity(T (&matrix)[Size])
+ {
+ static_assert(gl::iSquareRoot<Size>() != 0, "Matrix is not square.");
+
+ const auto cols = gl::iSquareRoot<Size>();
+ const auto one = T(1);
+ const auto zero = T(0);
+
+ for (auto &e : matrix)
+ e = zero;
+
+ for (unsigned int i = 0; i < cols; ++i)
+ {
+ const auto pos = i * cols + (i % cols);
+ matrix[pos] = one;
+ }
+ }
+
+ protected:
+ std::vector<T> mElements;
+ unsigned int mRows;
+ unsigned int mCols;
+};
+
+class Mat4 : public Matrix<float>
+{
+ public:
+ Mat4();
+ Mat4(const Matrix<float> generalMatrix);
+ Mat4(const std::vector<float> &elements);
+ Mat4(const float *elements);
+ Mat4(float m00,
+ float m01,
+ float m02,
+ float m03,
+ float m10,
+ float m11,
+ float m12,
+ float m13,
+ float m20,
+ float m21,
+ float m22,
+ float m23,
+ float m30,
+ float m31,
+ float m32,
+ float m33);
+
+ static Mat4 Rotate(float angle, const Vector3 &axis);
+ static Mat4 Translate(const Vector3 &t);
+ static Mat4 Scale(const Vector3 &s);
+ static Mat4 Frustum(float l, float r, float b, float t, float n, float f);
+ static Mat4 Perspective(float fov, float aspectRatio, float n, float f);
+ static Mat4 Ortho(float l, float r, float b, float t, float n, float f);
+
+ Mat4 product(const Mat4 &m);
+ Vector4 product(const Vector4 &b);
+ void dump();
+};
+
+} // namespace angle
+
+#endif // COMMON_MATRIX_UTILS_H_
diff --git a/gfx/angle/checkout/src/common/platform.h b/gfx/angle/checkout/src/common/platform.h
new file mode 100644
index 0000000000..77267ab680
--- /dev/null
+++ b/gfx/angle/checkout/src/common/platform.h
@@ -0,0 +1,209 @@
+//
+// Copyright 2014 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+
+// platform.h: Operating system specific includes and defines.
+
+#ifndef COMMON_PLATFORM_H_
+#define COMMON_PLATFORM_H_
+
+#if defined(_WIN32)
+# define ANGLE_PLATFORM_WINDOWS 1
+#elif defined(__Fuchsia__)
+# define ANGLE_PLATFORM_FUCHSIA 1
+# define ANGLE_PLATFORM_POSIX 1
+#elif defined(__APPLE__)
+# define ANGLE_PLATFORM_APPLE 1
+# define ANGLE_PLATFORM_POSIX 1
+#elif defined(ANDROID)
+# define ANGLE_PLATFORM_ANDROID 1
+# define ANGLE_PLATFORM_POSIX 1
+#elif defined(__ggp__)
+# define ANGLE_PLATFORM_GGP 1
+# define ANGLE_PLATFORM_POSIX 1
+#elif defined(__linux__) || defined(EMSCRIPTEN)
+# define ANGLE_PLATFORM_LINUX 1
+# define ANGLE_PLATFORM_POSIX 1
+#elif defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__) || \
+ defined(__DragonFly__) || defined(__sun) || defined(__GLIBC__) || defined(__GNU__) || \
+ defined(__QNX__) || defined(__Fuchsia__) || defined(__HAIKU__)
+# define ANGLE_PLATFORM_POSIX 1
+#else
+# error Unsupported platform.
+#endif
+
+#ifdef ANGLE_PLATFORM_WINDOWS
+# ifndef STRICT
+# define STRICT 1
+# endif
+# ifndef WIN32_LEAN_AND_MEAN
+# define WIN32_LEAN_AND_MEAN 1
+# endif
+# ifndef NOMINMAX
+# define NOMINMAX 1
+# endif
+
+# include <intrin.h>
+
+# if defined(WINAPI_FAMILY) && (WINAPI_FAMILY != WINAPI_FAMILY_DESKTOP_APP)
+# define ANGLE_ENABLE_WINDOWS_UWP 1
+# endif
+
+# if defined(ANGLE_ENABLE_D3D9)
+# include <d3d9.h>
+# include <d3dcompiler.h>
+# endif
+
+// Include D3D11 headers when OpenGL is enabled on Windows for interop extensions.
+# if defined(ANGLE_ENABLE_D3D11) || defined(ANGLE_ENABLE_OPENGL)
+# include <d3d10_1.h>
+# include <d3d11.h>
+# include <d3d11_3.h>
+# include <d3d11on12.h>
+# include <d3d12.h>
+# include <d3dcompiler.h>
+# include <dxgi.h>
+# include <dxgi1_2.h>
+# include <dxgi1_4.h>
+# endif
+
+# if defined(ANGLE_ENABLE_D3D9) || defined(ANGLE_ENABLE_D3D11)
+# include <wrl.h>
+# endif
+
+# if defined(ANGLE_ENABLE_WINDOWS_UWP)
+# include <dxgi1_3.h>
+# if defined(_DEBUG)
+# include <DXProgrammableCapture.h>
+# include <dxgidebug.h>
+# endif
+# endif
+
+// Include <windows.h> to ensure tests related files can be built when building
+// vulkan only backend ANGLE on windows.
+# if defined(ANGLE_ENABLE_VULKAN)
+# include <windows.h>
+# endif
+
+// Macros 'near', 'far', 'NEAR' and 'FAR' are defined by 'shared/minwindef.h' in the Windows SDK.
+// Macros 'near' and 'far' are empty. They are not used by other Windows headers and are undefined
+// here to avoid identifier conflicts. Macros 'NEAR' and 'FAR' contain 'near' and 'far'. They are
+// used by other Windows headers and are cleared here to avoid compilation errors.
+# undef near
+# undef far
+# undef NEAR
+# undef FAR
+# define NEAR
+# define FAR
+#endif
+
+#if defined(_MSC_VER) && !defined(_M_ARM) && !defined(_M_ARM64)
+# include <intrin.h>
+# define ANGLE_USE_SSE
+#elif defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__))
+# include <x86intrin.h>
+# define ANGLE_USE_SSE
+#endif
+
+// Mips and arm devices need to include stddef for size_t.
+#if defined(__mips__) || defined(__arm__) || defined(__aarch64__)
+# include <stddef.h>
+#endif
+
+// The MemoryBarrier function name collides with a macro under Windows
+// We will undef the macro so that the function name does not get replaced
+#undef MemoryBarrier
+
+// Macro for hinting that an expression is likely to be true/false.
+#if !defined(ANGLE_LIKELY) || !defined(ANGLE_UNLIKELY)
+# if defined(__GNUC__) || defined(__clang__)
+# define ANGLE_LIKELY(x) __builtin_expect(!!(x), 1)
+# define ANGLE_UNLIKELY(x) __builtin_expect(!!(x), 0)
+# else
+# define ANGLE_LIKELY(x) (x)
+# define ANGLE_UNLIKELY(x) (x)
+# endif // defined(__GNUC__) || defined(__clang__)
+#endif // !defined(ANGLE_LIKELY) || !defined(ANGLE_UNLIKELY)
+
+#ifdef ANGLE_PLATFORM_APPLE
+# include <TargetConditionals.h>
+# if TARGET_OS_OSX
+# define ANGLE_PLATFORM_MACOS 1
+# elif TARGET_OS_IPHONE
+# define ANGLE_PLATFORM_IOS 1
+# if TARGET_OS_SIMULATOR
+# define ANGLE_PLATFORM_IOS_SIMULATOR 1
+# endif
+# if TARGET_OS_MACCATALYST
+# define ANGLE_PLATFORM_MACCATALYST 1
+# endif
+# elif TARGET_OS_WATCH
+# define ANGLE_PLATFORM_WATCHOS 1
+# if TARGET_OS_SIMULATOR
+# define ANGLE_PLATFORM_IOS_SIMULATOR 1
+# endif
+# elif TARGET_OS_TV
+# define ANGLE_PLATFORM_APPLETV 1
+# if TARGET_OS_SIMULATOR
+# define ANGLE_PLATFORM_IOS_SIMULATOR 1
+# endif
+# endif
+# // This might be useful globally. At the moment it is used
+# // to differentiate MacCatalyst on Intel and Apple Silicon.
+# if defined(__arm64__) || defined(__aarch64__)
+# define ANGLE_CPU_ARM64 1
+# endif
+# // EAGL should be enabled on iOS, but not Mac Catalyst unless it is running on Apple Silicon.
+# if (defined(ANGLE_PLATFORM_IOS) && !defined(ANGLE_PLATFORM_MACCATALYST)) || \
+ (defined(ANGLE_PLATFORM_MACCATALYST) && defined(ANGLE_CPU_ARM64))
+# define ANGLE_ENABLE_EAGL
+# endif
+# // Identify Metal API >= what shipped on macOS Catalina.
+# if (defined(ANGLE_PLATFORM_MACOS) && __MAC_OS_X_VERSION_MAX_ALLOWED >= 101500) || \
+ (defined(ANGLE_PLATFORM_IOS) && __IPHONE_OS_VERSION_MAX_ALLOWED >= 130000)
+# define ANGLE_WITH_MODERN_METAL_API 1
+# endif
+#endif
+
+// Define ANGLE_WITH_ASAN macro.
+#if defined(__has_feature)
+# if __has_feature(address_sanitizer)
+# define ANGLE_WITH_ASAN 1
+# endif
+#endif
+
+// Define ANGLE_WITH_MSAN macro.
+#if defined(__has_feature)
+# if __has_feature(memory_sanitizer)
+# define ANGLE_WITH_MSAN 1
+# endif
+#endif
+
+// Define ANGLE_WITH_TSAN macro.
+#if defined(__has_feature)
+# if __has_feature(thread_sanitizer)
+# define ANGLE_WITH_TSAN 1
+# endif
+#endif
+
+// Define ANGLE_WITH_UBSAN macro.
+#if defined(__has_feature)
+# if __has_feature(undefined_behavior_sanitizer)
+# define ANGLE_WITH_UBSAN 1
+# endif
+#endif
+
+#if defined(ANGLE_WITH_ASAN) || defined(ANGLE_WITH_TSAN) || defined(ANGLE_WITH_UBSAN)
+# define ANGLE_WITH_SANITIZER 1
+#endif // defined(ANGLE_WITH_ASAN) || defined(ANGLE_WITH_TSAN) || defined(ANGLE_WITH_UBSAN)
+
+#include <cstdint>
+#if INTPTR_MAX == INT64_MAX
+# define ANGLE_IS_64_BIT_CPU 1
+#else
+# define ANGLE_IS_32_BIT_CPU 1
+#endif
+
+#endif // COMMON_PLATFORM_H_
diff --git a/gfx/angle/checkout/src/common/spirv/spirv_types.h b/gfx/angle/checkout/src/common/spirv/spirv_types.h
new file mode 100644
index 0000000000..faf2174481
--- /dev/null
+++ b/gfx/angle/checkout/src/common/spirv/spirv_types.h
@@ -0,0 +1,133 @@
+//
+// Copyright 2021 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// spirv_types.h:
+// Strong types for SPIR-V Ids to prevent mistakes when using the builder and parser APIs.
+//
+
+#ifndef COMMON_SPIRV_TYPES_H_
+#define COMMON_SPIRV_TYPES_H_
+
+#include "common/FastVector.h"
+
+#include <vector>
+
+namespace angle
+{
+namespace spirv
+{
+template <typename Helper>
+class BoxedUint32
+{
+ public:
+ BoxedUint32() : mValue{0} {}
+ explicit BoxedUint32(uint32_t value) : mValue{value} {}
+ template <typename T>
+ T as() const
+ {
+ return T{mValue};
+ }
+ BoxedUint32(const BoxedUint32 &other) = default;
+ BoxedUint32 &operator=(const BoxedUint32 &other) = default;
+ operator uint32_t() const { return mValue.value; }
+ bool operator==(const BoxedUint32 &other) const { return mValue.value == other.mValue.value; }
+ // Applicable to ids, which cannot be 0.
+ bool valid() const { return static_cast<bool>(mValue.value); }
+
+ private:
+ Helper mValue;
+};
+
+struct IdRefHelper
+{
+ uint32_t value;
+};
+struct LiteralIntegerHelper
+{
+ uint32_t value;
+};
+
+using IdRef = BoxedUint32<IdRefHelper>;
+
+template <>
+inline BoxedUint32<IdRefHelper>::operator uint32_t() const
+{
+ ASSERT(valid());
+ return mValue.value;
+}
+
+// IdResult, IdResultType, IdMemorySemantics and IdScope are all translated as IdRef. This makes
+// the type verification weaker, but stops the API from becoming tediously verbose.
+using IdResult = IdRef;
+using IdResultType = IdRef;
+using IdMemorySemantics = IdRef;
+using IdScope = IdRef;
+using LiteralInteger = BoxedUint32<LiteralIntegerHelper>;
+using LiteralString = const char *;
+// Note: In ANGLE's use cases, all literals fit in 32 bits.
+using LiteralContextDependentNumber = LiteralInteger;
+// TODO(syoussefi): To be made stronger when generating SPIR-V from the translator.
+// http://anglebug.com/4889
+using LiteralExtInstInteger = LiteralInteger;
+
+struct PairLiteralIntegerIdRef
+{
+ LiteralInteger literal;
+ IdRef id;
+};
+
+struct PairIdRefLiteralInteger
+{
+ IdRef id;
+ LiteralInteger literal;
+};
+
+struct PairIdRefIdRef
+{
+ IdRef id1;
+ IdRef id2;
+};
+
+// Some instructions need 4 components. The drivers uniform struct in ANGLE has 8 fields. A value
+// of 8 means almost no instruction would end up making dynamic allocations. Notable exceptions are
+// user-defined structs/blocks and OpEntryPoint.
+constexpr size_t kFastVectorSize = 8;
+
+template <typename T>
+using FastVectorHelper = angle::FastVector<T, kFastVectorSize>;
+
+using IdRefList = FastVectorHelper<IdRef>;
+using LiteralIntegerList = FastVectorHelper<LiteralInteger>;
+using PairLiteralIntegerIdRefList = FastVectorHelper<PairLiteralIntegerIdRef>;
+using PairIdRefLiteralIntegerList = FastVectorHelper<PairIdRefLiteralInteger>;
+using PairIdRefIdRefList = FastVectorHelper<PairIdRefIdRef>;
+
+// Id 0 is invalid in SPIR-V.
+constexpr uint32_t kMinValidId = 1;
+
+// The SPIR-V blob is a sequence of uint32_t's
+using Blob = std::vector<uint32_t>;
+
+// Format of the SPIR-V header.
+// SPIR-V 1.0 Table 1: First Words of Physical Layout
+enum HeaderIndex
+{
+ kHeaderIndexMagic = 0,
+ kHeaderIndexVersion = 1,
+ kHeaderIndexGenerator = 2,
+ kHeaderIndexIndexBound = 3,
+ kHeaderIndexSchema = 4,
+ kHeaderIndexInstructions = 5,
+};
+
+// Returns whether SPIR-V is valid. Useful for ASSERTs. Automatically generates a warning if
+// SPIR-V is not valid.
+bool Validate(const Blob &blob);
+void Print(const Blob &blob);
+
+} // namespace spirv
+} // namespace angle
+
+#endif // COMMON_SPIRV_TYPES_H_
diff --git a/gfx/angle/checkout/src/common/string_utils.cpp b/gfx/angle/checkout/src/common/string_utils.cpp
new file mode 100644
index 0000000000..192d82917c
--- /dev/null
+++ b/gfx/angle/checkout/src/common/string_utils.cpp
@@ -0,0 +1,357 @@
+//
+// Copyright 2015 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// string_utils:
+// String helper functions.
+//
+
+#include "common/string_utils.h"
+
+#include <stdlib.h>
+#include <string.h>
+#include <algorithm>
+#include <cctype>
+#include <fstream>
+#include <sstream>
+
+#include "common/platform.h"
+#include "common/system_utils.h"
+
+namespace
+{
+
+bool EndsWithSuffix(const char *str,
+ const size_t strLen,
+ const char *suffix,
+ const size_t suffixLen)
+{
+ return suffixLen <= strLen && strncmp(str + strLen - suffixLen, suffix, suffixLen) == 0;
+}
+
+} // anonymous namespace
+
+namespace angle
+{
+
+const char kWhitespaceASCII[] = " \f\n\r\t\v";
+
+std::vector<std::string> SplitString(const std::string &input,
+ const std::string &delimiters,
+ WhitespaceHandling whitespace,
+ SplitResult resultType)
+{
+ std::vector<std::string> result;
+ if (input.empty())
+ {
+ return result;
+ }
+
+ std::string::size_type start = 0;
+ while (start != std::string::npos)
+ {
+ auto end = input.find_first_of(delimiters, start);
+
+ std::string piece;
+ if (end == std::string::npos)
+ {
+ piece = input.substr(start);
+ start = std::string::npos;
+ }
+ else
+ {
+ piece = input.substr(start, end - start);
+ start = end + 1;
+ }
+
+ if (whitespace == TRIM_WHITESPACE)
+ {
+ piece = TrimString(piece, kWhitespaceASCII);
+ }
+
+ if (resultType == SPLIT_WANT_ALL || !piece.empty())
+ {
+ result.push_back(std::move(piece));
+ }
+ }
+
+ return result;
+}
+
+void SplitStringAlongWhitespace(const std::string &input, std::vector<std::string> *tokensOut)
+{
+
+ std::istringstream stream(input);
+ std::string line;
+
+ while (std::getline(stream, line))
+ {
+ size_t prev = 0, pos;
+ while ((pos = line.find_first_of(kWhitespaceASCII, prev)) != std::string::npos)
+ {
+ if (pos > prev)
+ tokensOut->push_back(line.substr(prev, pos - prev));
+ prev = pos + 1;
+ }
+ if (prev < line.length())
+ tokensOut->push_back(line.substr(prev, std::string::npos));
+ }
+}
+
+std::string TrimString(const std::string &input, const std::string &trimChars)
+{
+ auto begin = input.find_first_not_of(trimChars);
+ if (begin == std::string::npos)
+ {
+ return "";
+ }
+
+ std::string::size_type end = input.find_last_not_of(trimChars);
+ if (end == std::string::npos)
+ {
+ return input.substr(begin);
+ }
+
+ return input.substr(begin, end - begin + 1);
+}
+
+std::string GetPrefix(const std::string &input, size_t offset, const char *delimiter)
+{
+ size_t match = input.find(delimiter, offset);
+ if (match == std::string::npos)
+ {
+ return input.substr(offset);
+ }
+ return input.substr(offset, match - offset);
+}
+
+std::string GetPrefix(const std::string &input, size_t offset, char delimiter)
+{
+ size_t match = input.find(delimiter, offset);
+ if (match == std::string::npos)
+ {
+ return input.substr(offset);
+ }
+ return input.substr(offset, match - offset);
+}
+
+bool HexStringToUInt(const std::string &input, unsigned int *uintOut)
+{
+ unsigned int offset = 0;
+
+ if (input.size() >= 2 && input[0] == '0' && input[1] == 'x')
+ {
+ offset = 2u;
+ }
+
+ // Simple validity check
+ if (input.find_first_not_of("0123456789ABCDEFabcdef", offset) != std::string::npos)
+ {
+ return false;
+ }
+
+ std::stringstream inStream(input);
+ inStream >> std::hex >> *uintOut;
+ return !inStream.fail();
+}
+
+bool ReadFileToString(const std::string &path, std::string *stringOut)
+{
+ std::ifstream inFile(path.c_str());
+ if (inFile.fail())
+ {
+ return false;
+ }
+
+ inFile.seekg(0, std::ios::end);
+ stringOut->reserve(static_cast<std::string::size_type>(inFile.tellg()));
+ inFile.seekg(0, std::ios::beg);
+
+ stringOut->assign(std::istreambuf_iterator<char>(inFile), std::istreambuf_iterator<char>());
+ return !inFile.fail();
+}
+
+bool BeginsWith(const std::string &str, const std::string &prefix)
+{
+ return strncmp(str.c_str(), prefix.c_str(), prefix.length()) == 0;
+}
+
+bool BeginsWith(const std::string &str, const char *prefix)
+{
+ return strncmp(str.c_str(), prefix, strlen(prefix)) == 0;
+}
+
+bool BeginsWith(const char *str, const char *prefix)
+{
+ return strncmp(str, prefix, strlen(prefix)) == 0;
+}
+
+bool BeginsWith(const std::string &str, const std::string &prefix, const size_t prefixLength)
+{
+ return strncmp(str.c_str(), prefix.c_str(), prefixLength) == 0;
+}
+
+bool EndsWith(const std::string &str, const std::string &suffix)
+{
+ return EndsWithSuffix(str.c_str(), str.length(), suffix.c_str(), suffix.length());
+}
+
+bool EndsWith(const std::string &str, const char *suffix)
+{
+ return EndsWithSuffix(str.c_str(), str.length(), suffix, strlen(suffix));
+}
+
+bool EndsWith(const char *str, const char *suffix)
+{
+ return EndsWithSuffix(str, strlen(str), suffix, strlen(suffix));
+}
+
+bool ContainsToken(const std::string &tokenStr, char delimiter, const std::string &token)
+{
+ if (token.empty())
+ {
+ return false;
+ }
+ // Compare token with all sub-strings terminated by delimiter or end of string
+ std::string::size_type start = 0u;
+ do
+ {
+ std::string::size_type end = tokenStr.find(delimiter, start);
+ if (end == std::string::npos)
+ {
+ end = tokenStr.length();
+ }
+ const std::string::size_type length = end - start;
+ if (length == token.length() && tokenStr.compare(start, length, token) == 0)
+ {
+ return true;
+ }
+ start = end + 1u;
+ } while (start < tokenStr.size());
+ return false;
+}
+
+void ToLower(std::string *str)
+{
+ for (char &ch : *str)
+ {
+ ch = static_cast<char>(::tolower(ch));
+ }
+}
+
+void ToUpper(std::string *str)
+{
+ for (char &ch : *str)
+ {
+ ch = static_cast<char>(::toupper(ch));
+ }
+}
+
+bool ReplaceSubstring(std::string *str,
+ const std::string &substring,
+ const std::string &replacement)
+{
+ size_t replacePos = str->find(substring);
+ if (replacePos == std::string::npos)
+ {
+ return false;
+ }
+ str->replace(replacePos, substring.size(), replacement);
+ return true;
+}
+
+int ReplaceAllSubstrings(std::string *str,
+ const std::string &substring,
+ const std::string &replacement)
+{
+ int count = 0;
+ while (ReplaceSubstring(str, substring, replacement))
+ {
+ count++;
+ }
+ return count;
+}
+
+std::string ToCamelCase(const std::string &str)
+{
+ std::string result;
+
+ bool lastWasUnderscore = false;
+ for (char c : str)
+ {
+ if (c == '_')
+ {
+ lastWasUnderscore = true;
+ continue;
+ }
+
+ if (lastWasUnderscore)
+ {
+ c = static_cast<char>(std::toupper(c));
+ lastWasUnderscore = false;
+ }
+ result += c;
+ }
+
+ return result;
+}
+
+std::vector<std::string> GetStringsFromEnvironmentVarOrAndroidProperty(const char *varName,
+ const char *propertyName,
+ const char *separator)
+{
+ std::string environment = GetEnvironmentVarOrAndroidProperty(varName, propertyName);
+ return SplitString(environment, separator, TRIM_WHITESPACE, SPLIT_WANT_NONEMPTY);
+}
+
+std::vector<std::string> GetCachedStringsFromEnvironmentVarOrAndroidProperty(
+ const char *varName,
+ const char *propertyName,
+ const char *separator)
+{
+ std::string environment = GetEnvironmentVarOrAndroidProperty(varName, propertyName);
+ return SplitString(environment, separator, TRIM_WHITESPACE, SPLIT_WANT_NONEMPTY);
+}
+
+// glob can have * as wildcard
+bool NamesMatchWithWildcard(const char *glob, const char *name)
+{
+ // Find the first * in glob.
+ const char *firstWildcard = strchr(glob, '*');
+
+ // If there are no wildcards, match the strings precisely.
+ if (firstWildcard == nullptr)
+ {
+ return strcmp(glob, name) == 0;
+ }
+
+ // Otherwise, match up to the wildcard first.
+ size_t preWildcardLen = firstWildcard - glob;
+ if (strncmp(glob, name, preWildcardLen) != 0)
+ {
+ return false;
+ }
+
+ const char *postWildcardRef = glob + preWildcardLen + 1;
+
+ // As a small optimization, if the wildcard is the last character in glob, accept the match
+ // already.
+ if (postWildcardRef[0] == '\0')
+ {
+ return true;
+ }
+
+ // Try to match the wildcard with a number of characters.
+ for (size_t matchSize = 0; name[matchSize] != '\0'; ++matchSize)
+ {
+ if (NamesMatchWithWildcard(postWildcardRef, name + matchSize))
+ {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+} // namespace angle
diff --git a/gfx/angle/checkout/src/common/string_utils.h b/gfx/angle/checkout/src/common/string_utils.h
new file mode 100644
index 0000000000..7be7efe983
--- /dev/null
+++ b/gfx/angle/checkout/src/common/string_utils.h
@@ -0,0 +1,125 @@
+//
+// Copyright 2015 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// string_utils:
+// String helper functions.
+//
+
+#ifndef LIBANGLE_STRING_UTILS_H_
+#define LIBANGLE_STRING_UTILS_H_
+
+#include <string>
+#include <vector>
+
+#include "common/Optional.h"
+
+namespace angle
+{
+
+extern const char kWhitespaceASCII[];
+
+enum WhitespaceHandling
+{
+ KEEP_WHITESPACE,
+ TRIM_WHITESPACE,
+};
+
+enum SplitResult
+{
+ SPLIT_WANT_ALL,
+ SPLIT_WANT_NONEMPTY,
+};
+
+std::vector<std::string> SplitString(const std::string &input,
+ const std::string &delimiters,
+ WhitespaceHandling whitespace,
+ SplitResult resultType);
+
+void SplitStringAlongWhitespace(const std::string &input, std::vector<std::string> *tokensOut);
+
+std::string TrimString(const std::string &input, const std::string &trimChars);
+
+// Return the substring starting at offset and up to the first occurance of the |delimeter|.
+std::string GetPrefix(const std::string &input, size_t offset, const char *delimiter);
+std::string GetPrefix(const std::string &input, size_t offset, char delimiter);
+
+bool HexStringToUInt(const std::string &input, unsigned int *uintOut);
+
+bool ReadFileToString(const std::string &path, std::string *stringOut);
+
+// Check if the string str begins with the given prefix.
+// The comparison is case sensitive.
+bool BeginsWith(const std::string &str, const std::string &prefix);
+
+// Check if the string str begins with the given prefix.
+// Prefix may not be NULL and needs to be NULL terminated.
+// The comparison is case sensitive.
+bool BeginsWith(const std::string &str, const char *prefix);
+
+// Check if the string str begins with the given prefix.
+// str and prefix may not be NULL and need to be NULL terminated.
+// The comparison is case sensitive.
+bool BeginsWith(const char *str, const char *prefix);
+
+// Check if the string str begins with the first prefixLength characters of the given prefix.
+// The length of the prefix string should be greater than or equal to prefixLength.
+// The comparison is case sensitive.
+bool BeginsWith(const std::string &str, const std::string &prefix, const size_t prefixLength);
+
+// Check if the string str ends with the given suffix.
+// The comparison is case sensitive.
+bool EndsWith(const std::string &str, const std::string &suffix);
+
+// Check if the string str ends with the given suffix.
+// Suffix may not be NULL and needs to be NULL terminated.
+// The comparison is case sensitive.
+bool EndsWith(const std::string &str, const char *suffix);
+
+// Check if the string str ends with the given suffix.
+// str and suffix may not be NULL and need to be NULL terminated.
+// The comparison is case sensitive.
+bool EndsWith(const char *str, const char *suffix);
+
+// Check if the given token string contains the given token.
+// The tokens are separated by the given delimiter.
+// The comparison is case sensitive.
+bool ContainsToken(const std::string &tokenStr, char delimiter, const std::string &token);
+
+// Convert to lower-case.
+void ToLower(std::string *str);
+
+// Convert to upper-case.
+void ToUpper(std::string *str);
+
+// Replaces the substring 'substring' in 'str' with 'replacement'. Returns true if successful.
+bool ReplaceSubstring(std::string *str,
+ const std::string &substring,
+ const std::string &replacement);
+
+// Replaces all substrings 'substring' in 'str' with 'replacement'. Returns count of replacements.
+int ReplaceAllSubstrings(std::string *str,
+ const std::string &substring,
+ const std::string &replacement);
+
+// Takes a snake_case string and turns it into camelCase.
+std::string ToCamelCase(const std::string &str);
+
+// Split up a string parsed from an environment variable.
+std::vector<std::string> GetStringsFromEnvironmentVarOrAndroidProperty(const char *varName,
+ const char *propertyName,
+ const char *separator);
+
+// Split up a string parsed from environment variable or via Android property, use cached result if
+// available.
+std::vector<std::string> GetCachedStringsFromEnvironmentVarOrAndroidProperty(
+ const char *varName,
+ const char *propertyName,
+ const char *separator);
+
+// glob can have * as wildcard
+bool NamesMatchWithWildcard(const char *glob, const char *name);
+} // namespace angle
+
+#endif // LIBANGLE_STRING_UTILS_H_
diff --git a/gfx/angle/checkout/src/common/system_utils.cpp b/gfx/angle/checkout/src/common/system_utils.cpp
new file mode 100644
index 0000000000..89b632bc17
--- /dev/null
+++ b/gfx/angle/checkout/src/common/system_utils.cpp
@@ -0,0 +1,267 @@
+//
+// Copyright 2018 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+
+// system_utils.cpp: Implementation of common functions
+
+#include "common/system_utils.h"
+#include "common/debug.h"
+
+#include <stdlib.h>
+#include <atomic>
+
+#if defined(ANGLE_PLATFORM_ANDROID)
+# include <sys/system_properties.h>
+#endif
+
+#if defined(ANGLE_PLATFORM_APPLE)
+# include <dispatch/dispatch.h>
+# include <pthread.h>
+#endif
+
+namespace angle
+{
+std::string GetExecutableName()
+{
+#if defined(ANGLE_PLATFORM_ANDROID) && __ANDROID_API__ >= 21
+ // Support for "getprogname" function in bionic was introduced in L (API level 21)
+ const char *executableName = getprogname();
+ return (executableName) ? std::string(executableName) : "ANGLE";
+#else
+ std::string executableName = GetExecutablePath();
+ size_t lastPathSepLoc = executableName.find_last_of(GetPathSeparator());
+ return (lastPathSepLoc > 0 ? executableName.substr(lastPathSepLoc + 1, executableName.length())
+ : "ANGLE");
+#endif // ANGLE_PLATFORM_ANDROID
+}
+
+// On Android return value cached in the process environment, if none, call
+// GetEnvironmentVarOrUnCachedAndroidProperty if not in environment.
+std::string GetEnvironmentVarOrAndroidProperty(const char *variableName, const char *propertyName)
+{
+#if defined(ANGLE_PLATFORM_ANDROID) && __ANDROID_API__ >= 21
+ // Can't use GetEnvironmentVar here because that won't allow us to distinguish between the
+ // environment being set to an empty string vs. not set at all.
+ const char *variableValue = getenv(variableName);
+ if (variableValue != nullptr)
+ {
+ std::string value(variableValue);
+ return value;
+ }
+#endif
+ return GetEnvironmentVarOrUnCachedAndroidProperty(variableName, propertyName);
+}
+
+// On Android call out to 'getprop' on a shell to get an Android property. On desktop, return
+// the value of the environment variable.
+std::string GetEnvironmentVarOrUnCachedAndroidProperty(const char *variableName,
+ const char *propertyName)
+{
+#if defined(ANGLE_PLATFORM_ANDROID) && __ANDROID_API__ >= 26
+ std::string propertyValue;
+
+ const prop_info *propertyInfo = __system_property_find(propertyName);
+ if (propertyInfo != nullptr)
+ {
+ __system_property_read_callback(
+ propertyInfo,
+ [](void *cookie, const char *, const char *value, unsigned) {
+ auto propertyValue = reinterpret_cast<std::string *>(cookie);
+ *propertyValue = value;
+ },
+ &propertyValue);
+ }
+
+ return propertyValue;
+#else
+ // Return the environment variable's value.
+ return GetEnvironmentVar(variableName);
+#endif // ANGLE_PLATFORM_ANDROID
+}
+
+// Look up a property and add it to the application's environment.
+// Adding to the env is a performance optimization, as getting properties is expensive.
+// This should only be used in non-Release paths, i.e. when using FrameCapture or DebugUtils.
+// It can cause race conditions in stress testing. See http://anglebug.com/6822
+std::string GetAndSetEnvironmentVarOrUnCachedAndroidProperty(const char *variableName,
+ const char *propertyName)
+{
+ std::string value = GetEnvironmentVarOrUnCachedAndroidProperty(variableName, propertyName);
+
+#if defined(ANGLE_PLATFORM_ANDROID)
+ if (!value.empty())
+ {
+ // Set the environment variable with the value to improve future lookups (avoids
+ SetEnvironmentVar(variableName, value.c_str());
+ }
+#endif
+
+ return value;
+}
+
+bool GetBoolEnvironmentVar(const char *variableName)
+{
+ std::string envVarString = GetEnvironmentVar(variableName);
+ return (!envVarString.empty() && envVarString == "1");
+}
+
+bool PrependPathToEnvironmentVar(const char *variableName, const char *path)
+{
+ std::string oldValue = GetEnvironmentVar(variableName);
+ const char *newValue = nullptr;
+ std::string buf;
+ if (oldValue.empty())
+ {
+ newValue = path;
+ }
+ else
+ {
+ buf = path;
+ buf += GetPathSeparatorForEnvironmentVar();
+ buf += oldValue;
+ newValue = buf.c_str();
+ }
+ return SetEnvironmentVar(variableName, newValue);
+}
+
+bool IsFullPath(std::string dirName)
+{
+ if (dirName.find(GetRootDirectory()) == 0)
+ {
+ return true;
+ }
+ return false;
+}
+
+std::string ConcatenatePath(std::string first, std::string second)
+{
+ if (first.empty())
+ {
+ return second;
+ }
+ if (second.empty())
+ {
+ return first;
+ }
+ if (IsFullPath(second))
+ {
+ return second;
+ }
+ bool firstRedundantPathSeparator = first.find_last_of(GetPathSeparator()) == first.length() - 1;
+ bool secondRedundantPathSeparator = second.find(GetPathSeparator()) == 0;
+ if (firstRedundantPathSeparator && secondRedundantPathSeparator)
+ {
+ return first + second.substr(1);
+ }
+ else if (firstRedundantPathSeparator || secondRedundantPathSeparator)
+ {
+ return first + second;
+ }
+ return first + GetPathSeparator() + second;
+}
+
+Optional<std::string> CreateTemporaryFile()
+{
+ const Optional<std::string> tempDir = GetTempDirectory();
+ if (!tempDir.valid())
+ return Optional<std::string>::Invalid();
+
+ return CreateTemporaryFileInDirectory(tempDir.value());
+}
+
+PageFaultHandler::PageFaultHandler(PageFaultCallback callback) : mCallback(callback) {}
+PageFaultHandler::~PageFaultHandler() {}
+
+Library *OpenSharedLibrary(const char *libraryName, SearchType searchType)
+{
+ void *libraryHandle = OpenSystemLibraryAndGetError(libraryName, searchType, nullptr);
+ return new Library(libraryHandle);
+}
+
+Library *OpenSharedLibraryWithExtension(const char *libraryName, SearchType searchType)
+{
+ void *libraryHandle =
+ OpenSystemLibraryWithExtensionAndGetError(libraryName, searchType, nullptr);
+ return new Library(libraryHandle);
+}
+
+Library *OpenSharedLibraryAndGetError(const char *libraryName,
+ SearchType searchType,
+ std::string *errorOut)
+{
+ void *libraryHandle = OpenSystemLibraryAndGetError(libraryName, searchType, errorOut);
+ return new Library(libraryHandle);
+}
+
+Library *OpenSharedLibraryWithExtensionAndGetError(const char *libraryName,
+ SearchType searchType,
+ std::string *errorOut)
+{
+ void *libraryHandle =
+ OpenSystemLibraryWithExtensionAndGetError(libraryName, searchType, errorOut);
+ return new Library(libraryHandle);
+}
+
+void *OpenSystemLibrary(const char *libraryName, SearchType searchType)
+{
+ return OpenSystemLibraryAndGetError(libraryName, searchType, nullptr);
+}
+
+void *OpenSystemLibraryWithExtension(const char *libraryName, SearchType searchType)
+{
+ return OpenSystemLibraryWithExtensionAndGetError(libraryName, searchType, nullptr);
+}
+
+void *OpenSystemLibraryAndGetError(const char *libraryName,
+ SearchType searchType,
+ std::string *errorOut)
+{
+ std::string libraryWithExtension = std::string(libraryName) + "." + GetSharedLibraryExtension();
+#if ANGLE_PLATFORM_IOS
+ // On iOS, libraryWithExtension is a directory in which the library resides.
+ // The actual library name doesn't have an extension at all.
+ // E.g. "libEGL.framework/libEGL"
+ libraryWithExtension = libraryWithExtension + "/" + libraryName;
+#endif
+ return OpenSystemLibraryWithExtensionAndGetError(libraryWithExtension.c_str(), searchType,
+ errorOut);
+}
+
+std::string StripFilenameFromPath(const std::string &path)
+{
+ size_t lastPathSepLoc = path.find_last_of("\\/");
+ return (lastPathSepLoc != std::string::npos) ? path.substr(0, lastPathSepLoc) : "";
+}
+
+static std::atomic<uint64_t> globalThreadSerial(1);
+
+#if defined(ANGLE_PLATFORM_APPLE)
+// https://anglebug.com/6479, similar to egl::GetCurrentThread() in libGLESv2/global_state.cpp
+uint64_t GetCurrentThreadUniqueId()
+{
+ static pthread_key_t tlsIndex;
+ static dispatch_once_t once;
+ dispatch_once(&once, ^{
+ ASSERT(pthread_key_create(&tlsIndex, nullptr) == 0);
+ });
+
+ void *tlsValue = pthread_getspecific(tlsIndex);
+ if (tlsValue == nullptr)
+ {
+ uint64_t threadId = globalThreadSerial++;
+ ASSERT(pthread_setspecific(tlsIndex, reinterpret_cast<void *>(threadId)) == 0);
+ return threadId;
+ }
+ return reinterpret_cast<uint64_t>(tlsValue);
+}
+#else
+uint64_t GetCurrentThreadUniqueId()
+{
+ thread_local uint64_t threadId(globalThreadSerial++);
+ return threadId;
+}
+#endif
+
+} // namespace angle
diff --git a/gfx/angle/checkout/src/common/system_utils.h b/gfx/angle/checkout/src/common/system_utils.h
new file mode 100644
index 0000000000..d9e435afaa
--- /dev/null
+++ b/gfx/angle/checkout/src/common/system_utils.h
@@ -0,0 +1,224 @@
+//
+// Copyright 2014 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+
+// system_utils.h: declaration of OS-specific utility functions
+
+#ifndef COMMON_SYSTEM_UTILS_H_
+#define COMMON_SYSTEM_UTILS_H_
+
+#include "common/Optional.h"
+#include "common/angleutils.h"
+
+#include <functional>
+#include <string>
+#include <string_view>
+
+namespace angle
+{
+std::string GetExecutableName();
+std::string GetExecutablePath();
+std::string GetExecutableDirectory();
+std::string GetModuleDirectory();
+const char *GetSharedLibraryExtension();
+const char *GetExecutableExtension();
+char GetPathSeparator();
+Optional<std::string> GetCWD();
+bool SetCWD(const char *dirName);
+bool SetEnvironmentVar(const char *variableName, const char *value);
+bool UnsetEnvironmentVar(const char *variableName);
+bool GetBoolEnvironmentVar(const char *variableName);
+std::string GetEnvironmentVar(const char *variableName);
+std::string GetEnvironmentVarOrUnCachedAndroidProperty(const char *variableName,
+ const char *propertyName);
+std::string GetAndSetEnvironmentVarOrUnCachedAndroidProperty(const char *variableName,
+ const char *propertyName);
+std::string GetEnvironmentVarOrAndroidProperty(const char *variableName, const char *propertyName);
+const char *GetPathSeparatorForEnvironmentVar();
+bool PrependPathToEnvironmentVar(const char *variableName, const char *path);
+bool IsDirectory(const char *filename);
+bool IsFullPath(std::string dirName);
+std::string GetRootDirectory();
+std::string ConcatenatePath(std::string first, std::string second);
+
+Optional<std::string> GetTempDirectory();
+Optional<std::string> CreateTemporaryFileInDirectory(const std::string &directory);
+Optional<std::string> CreateTemporaryFile();
+
+// Get absolute time in seconds. Use this function to get an absolute time with an unknown origin.
+double GetCurrentSystemTime();
+// Get CPU time for current process in seconds.
+double GetCurrentProcessCpuTime();
+
+// Unique thread id (std::this_thread::get_id() gets recycled!)
+uint64_t GetCurrentThreadUniqueId();
+
+// Run an application and get the output. Gets a nullptr-terminated set of args to execute the
+// application with, and returns the stdout and stderr outputs as well as the exit code.
+//
+// Pass nullptr for stdoutOut/stderrOut if you don't need to capture. exitCodeOut is required.
+//
+// Returns false if it fails to actually execute the application.
+bool RunApp(const std::vector<const char *> &args,
+ std::string *stdoutOut,
+ std::string *stderrOut,
+ int *exitCodeOut);
+
+// Use SYSTEM_DIR to bypass loading ANGLE libraries with the same name as system DLLS
+// (e.g. opengl32.dll)
+enum class SearchType
+{
+ // Try to find the library in the same directory as the current module
+ ModuleDir,
+ // Load the library from the system directories
+ SystemDir,
+ // Get a reference to an already loaded shared library.
+ AlreadyLoaded,
+};
+
+void *OpenSystemLibrary(const char *libraryName, SearchType searchType);
+void *OpenSystemLibraryWithExtension(const char *libraryName, SearchType searchType);
+void *OpenSystemLibraryAndGetError(const char *libraryName,
+ SearchType searchType,
+ std::string *errorOut);
+void *OpenSystemLibraryWithExtensionAndGetError(const char *libraryName,
+ SearchType searchType,
+ std::string *errorOut);
+
+void *GetLibrarySymbol(void *libraryHandle, const char *symbolName);
+std::string GetLibraryPath(void *libraryHandle);
+void CloseSystemLibrary(void *libraryHandle);
+
+class Library : angle::NonCopyable
+{
+ public:
+ Library() {}
+ Library(void *libraryHandle) : mLibraryHandle(libraryHandle) {}
+ ~Library() { close(); }
+
+ [[nodiscard]] bool open(const char *libraryName, SearchType searchType)
+ {
+ close();
+ mLibraryHandle = OpenSystemLibrary(libraryName, searchType);
+ return mLibraryHandle != nullptr;
+ }
+
+ [[nodiscard]] bool openWithExtension(const char *libraryName, SearchType searchType)
+ {
+ close();
+ mLibraryHandle = OpenSystemLibraryWithExtension(libraryName, searchType);
+ return mLibraryHandle != nullptr;
+ }
+
+ [[nodiscard]] bool openAndGetError(const char *libraryName,
+ SearchType searchType,
+ std::string *errorOut)
+ {
+ close();
+ mLibraryHandle = OpenSystemLibraryAndGetError(libraryName, searchType, errorOut);
+ return mLibraryHandle != nullptr;
+ }
+
+ [[nodiscard]] bool openWithExtensionAndGetError(const char *libraryName,
+ SearchType searchType,
+ std::string *errorOut)
+ {
+ close();
+ mLibraryHandle =
+ OpenSystemLibraryWithExtensionAndGetError(libraryName, searchType, errorOut);
+ return mLibraryHandle != nullptr;
+ }
+
+ void close()
+ {
+ if (mLibraryHandle)
+ {
+ CloseSystemLibrary(mLibraryHandle);
+ mLibraryHandle = nullptr;
+ }
+ }
+
+ void *getSymbol(const char *symbolName) { return GetLibrarySymbol(mLibraryHandle, symbolName); }
+
+ void *getNative() const { return mLibraryHandle; }
+
+ std::string getPath() const { return GetLibraryPath(mLibraryHandle); }
+
+ template <typename FuncT>
+ void getAs(const char *symbolName, FuncT *funcOut)
+ {
+ *funcOut = reinterpret_cast<FuncT>(getSymbol(symbolName));
+ }
+
+ private:
+ void *mLibraryHandle = nullptr;
+};
+
+Library *OpenSharedLibrary(const char *libraryName, SearchType searchType);
+Library *OpenSharedLibraryWithExtension(const char *libraryName, SearchType searchType);
+Library *OpenSharedLibraryAndGetError(const char *libraryName,
+ SearchType searchType,
+ std::string *errorOut);
+Library *OpenSharedLibraryWithExtensionAndGetError(const char *libraryName,
+ SearchType searchType,
+ std::string *errorOut);
+
+// Returns true if the process is currently being debugged.
+bool IsDebuggerAttached();
+
+// Calls system APIs to break into the debugger.
+void BreakDebugger();
+
+uint64_t GetProcessMemoryUsageKB();
+
+bool ProtectMemory(uintptr_t start, size_t size);
+bool UnprotectMemory(uintptr_t start, size_t size);
+
+size_t GetPageSize();
+
+// Return type of the PageFaultCallback
+enum class PageFaultHandlerRangeType
+{
+ // The memory address was known by the page fault handler
+ InRange,
+ // The memory address was not in the page fault handler's range
+ // and the signal will be forwarded to the default page handler.
+ OutOfRange,
+};
+
+using PageFaultCallback = std::function<PageFaultHandlerRangeType(uintptr_t)>;
+
+class PageFaultHandler : angle::NonCopyable
+{
+ public:
+ PageFaultHandler(PageFaultCallback callback);
+ virtual ~PageFaultHandler();
+
+ // Registers OS level page fault handler for memory protection signals
+ // and enables reception on PageFaultCallback
+ virtual bool enable() = 0;
+
+ // Unregisters OS level page fault handler and deactivates PageFaultCallback
+ virtual bool disable() = 0;
+
+ protected:
+ PageFaultCallback mCallback;
+};
+
+// Creates single instance page fault handler
+PageFaultHandler *CreatePageFaultHandler(PageFaultCallback callback);
+
+#ifdef ANGLE_PLATFORM_WINDOWS
+// Convert an UTF-16 wstring to an UTF-8 string.
+std::string Narrow(const std::wstring_view &utf16);
+
+// Convert an UTF-8 string to an UTF-16 wstring.
+std::wstring Widen(const std::string_view &utf8);
+#endif
+
+std::string StripFilenameFromPath(const std::string &path);
+} // namespace angle
+
+#endif // COMMON_SYSTEM_UTILS_H_
diff --git a/gfx/angle/checkout/src/common/system_utils_apple.cpp b/gfx/angle/checkout/src/common/system_utils_apple.cpp
new file mode 100644
index 0000000000..532714248c
--- /dev/null
+++ b/gfx/angle/checkout/src/common/system_utils_apple.cpp
@@ -0,0 +1,59 @@
+//
+// Copyright 2020 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+
+// system_utils_apple.cpp: Implementation of OS-specific functions for Apple platforms
+
+#include "system_utils.h"
+
+#include <unistd.h>
+
+#include <CoreServices/CoreServices.h>
+#include <mach-o/dyld.h>
+#include <mach/mach.h>
+#include <mach/mach_time.h>
+#include <cstdlib>
+#include <vector>
+
+#include <array>
+
+namespace angle
+{
+std::string GetExecutablePath()
+{
+ std::string result;
+
+ uint32_t size = 0;
+ _NSGetExecutablePath(nullptr, &size);
+
+ std::vector<char> buffer;
+ buffer.resize(size + 1);
+
+ _NSGetExecutablePath(buffer.data(), &size);
+ buffer[size] = '\0';
+
+ if (!strrchr(buffer.data(), '/'))
+ {
+ return "";
+ }
+ return buffer.data();
+}
+
+std::string GetExecutableDirectory()
+{
+ std::string executablePath = GetExecutablePath();
+ size_t lastPathSepLoc = executablePath.find_last_of("/");
+ return (lastPathSepLoc != std::string::npos) ? executablePath.substr(0, lastPathSepLoc) : "";
+}
+
+double GetCurrentSystemTime()
+{
+ mach_timebase_info_data_t timebaseInfo;
+ mach_timebase_info(&timebaseInfo);
+
+ double secondCoeff = timebaseInfo.numer * 1e-9 / timebaseInfo.denom;
+ return secondCoeff * mach_absolute_time();
+}
+} // namespace angle
diff --git a/gfx/angle/checkout/src/common/system_utils_linux.cpp b/gfx/angle/checkout/src/common/system_utils_linux.cpp
new file mode 100644
index 0000000000..da0ed86028
--- /dev/null
+++ b/gfx/angle/checkout/src/common/system_utils_linux.cpp
@@ -0,0 +1,55 @@
+//
+// Copyright 2015 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+
+// system_utils_linux.cpp: Implementation of OS-specific functions for Linux
+
+#include "system_utils.h"
+
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <array>
+
+namespace angle
+{
+std::string GetExecutablePath()
+{
+ // We cannot use lstat to get the size of /proc/self/exe as it always returns 0
+ // so we just use a big buffer and hope the path fits in it.
+ char path[4096];
+
+ ssize_t result = readlink("/proc/self/exe", path, sizeof(path) - 1);
+ if (result < 0 || static_cast<size_t>(result) >= sizeof(path) - 1)
+ {
+ return "";
+ }
+
+ path[result] = '\0';
+ return path;
+}
+
+std::string GetExecutableDirectory()
+{
+ std::string executablePath = GetExecutablePath();
+ size_t lastPathSepLoc = executablePath.find_last_of("/");
+ return (lastPathSepLoc != std::string::npos) ? executablePath.substr(0, lastPathSepLoc) : "";
+}
+
+const char *GetSharedLibraryExtension()
+{
+ return "so";
+}
+
+double GetCurrentSystemTime()
+{
+ struct timespec currentTime;
+ clock_gettime(CLOCK_MONOTONIC, &currentTime);
+ return currentTime.tv_sec + currentTime.tv_nsec * 1e-9;
+}
+
+} // namespace angle
diff --git a/gfx/angle/checkout/src/common/system_utils_mac.cpp b/gfx/angle/checkout/src/common/system_utils_mac.cpp
new file mode 100644
index 0000000000..2c48a7eb04
--- /dev/null
+++ b/gfx/angle/checkout/src/common/system_utils_mac.cpp
@@ -0,0 +1,28 @@
+//
+// Copyright 2015 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+
+// system_utils_osx.cpp: Implementation of OS-specific functions for OSX
+
+#include "system_utils.h"
+
+#include <unistd.h>
+
+#include <CoreServices/CoreServices.h>
+#include <mach-o/dyld.h>
+#include <mach/mach.h>
+#include <mach/mach_time.h>
+#include <cstdlib>
+#include <vector>
+
+#include <array>
+
+namespace angle
+{
+const char *GetSharedLibraryExtension()
+{
+ return "dylib";
+}
+} // namespace angle
diff --git a/gfx/angle/checkout/src/common/system_utils_posix.cpp b/gfx/angle/checkout/src/common/system_utils_posix.cpp
new file mode 100644
index 0000000000..ab0faee0bc
--- /dev/null
+++ b/gfx/angle/checkout/src/common/system_utils_posix.cpp
@@ -0,0 +1,470 @@
+//
+// Copyright 2018 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+
+// system_utils_posix.cpp: Implementation of POSIX OS-specific functions.
+
+#include "common/debug.h"
+#include "system_utils.h"
+
+#include <array>
+#include <iostream>
+
+#include <dlfcn.h>
+#include <grp.h>
+#include <inttypes.h>
+#include <pwd.h>
+#include <signal.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include "common/string_utils.h"
+
+#ifdef ANGLE_PLATFORM_FUCHSIA
+# include <zircon/process.h>
+# include <zircon/syscalls.h>
+#else
+# include <sys/resource.h>
+#endif
+
+namespace angle
+{
+
+namespace
+{
+std::string GetModulePath(void *moduleOrSymbol)
+{
+ Dl_info dlInfo;
+ if (dladdr(moduleOrSymbol, &dlInfo) == 0)
+ {
+ return "";
+ }
+
+ return dlInfo.dli_fname;
+}
+
+void *OpenPosixLibrary(const std::string &fullPath, int extraFlags, std::string *errorOut)
+{
+ void *module = dlopen(fullPath.c_str(), RTLD_NOW | extraFlags);
+ if (module)
+ {
+ if (errorOut)
+ {
+ *errorOut = fullPath;
+ }
+ }
+ else if (errorOut)
+ {
+ *errorOut = "dlopen(";
+ *errorOut += fullPath;
+ *errorOut += ") failed with error: ";
+ *errorOut += dlerror();
+ struct stat sfile;
+ if (-1 == stat(fullPath.c_str(), &sfile))
+ {
+ *errorOut += ", stat() call failed.";
+ }
+ else
+ {
+ *errorOut += ", stat() info: ";
+ struct passwd *pwuser = getpwuid(sfile.st_uid);
+ if (pwuser)
+ {
+ *errorOut += "owner: ";
+ *errorOut += pwuser->pw_name;
+ *errorOut += ", ";
+ }
+ struct group *grpnam = getgrgid(sfile.st_gid);
+ if (grpnam)
+ {
+ *errorOut += "group: ";
+ *errorOut += grpnam->gr_name;
+ *errorOut += ", ";
+ }
+ *errorOut += "perms: ";
+ *errorOut += std::to_string(sfile.st_mode);
+ *errorOut += ", links: ";
+ *errorOut += std::to_string(sfile.st_nlink);
+ *errorOut += ", size: ";
+ *errorOut += std::to_string(sfile.st_size);
+ }
+ }
+ return module;
+}
+} // namespace
+
+Optional<std::string> GetCWD()
+{
+ std::array<char, 4096> pathBuf;
+ char *result = getcwd(pathBuf.data(), pathBuf.size());
+ if (result == nullptr)
+ {
+ return Optional<std::string>::Invalid();
+ }
+ return std::string(pathBuf.data());
+}
+
+bool SetCWD(const char *dirName)
+{
+ return (chdir(dirName) == 0);
+}
+
+bool UnsetEnvironmentVar(const char *variableName)
+{
+ return (unsetenv(variableName) == 0);
+}
+
+bool SetEnvironmentVar(const char *variableName, const char *value)
+{
+ return (setenv(variableName, value, 1) == 0);
+}
+
+std::string GetEnvironmentVar(const char *variableName)
+{
+ const char *value = getenv(variableName);
+ return (value == nullptr ? std::string() : std::string(value));
+}
+
+const char *GetPathSeparatorForEnvironmentVar()
+{
+ return ":";
+}
+
+std::string GetModuleDirectoryAndGetError(std::string *errorOut)
+{
+ std::string directory;
+ static int placeholderSymbol = 0;
+ std::string moduleName = GetModulePath(&placeholderSymbol);
+ if (!moduleName.empty())
+ {
+ directory = moduleName.substr(0, moduleName.find_last_of('/') + 1);
+ }
+
+ // Ensure we return the full path to the module, not the relative path
+ if (!IsFullPath(directory))
+ {
+ if (errorOut)
+ {
+ *errorOut += "Directory: '";
+ *errorOut += directory;
+ *errorOut += "' is not full path";
+ }
+ Optional<std::string> cwd = GetCWD();
+ if (cwd.valid())
+ {
+ directory = ConcatenatePath(cwd.value(), directory);
+ if (errorOut)
+ {
+ *errorOut += ", so it has been modified to: '";
+ *errorOut += directory;
+ *errorOut += "'. ";
+ }
+ }
+ else if (errorOut)
+ {
+ *errorOut += " and getcwd was invalid. ";
+ }
+ }
+ return directory;
+}
+
+std::string GetModuleDirectory()
+{
+ return GetModuleDirectoryAndGetError(nullptr);
+}
+
+void *OpenSystemLibraryWithExtensionAndGetError(const char *libraryName,
+ SearchType searchType,
+ std::string *errorOut)
+{
+ std::string directory;
+ if (searchType == SearchType::ModuleDir)
+ {
+#if ANGLE_PLATFORM_IOS
+ // On iOS, shared libraries must be loaded from within the app bundle.
+ directory = GetExecutableDirectory() + "/Frameworks/";
+#elif ANGLE_PLATFORM_FUCHSIA
+ // On Fuchsia the dynamic loader always looks up libraries in /pkg/lib
+ // and disallows loading of libraries via absolute paths.
+ directory = "";
+#else
+ directory = GetModuleDirectoryAndGetError(errorOut);
+#endif
+ }
+
+ int extraFlags = 0;
+ if (searchType == SearchType::AlreadyLoaded)
+ {
+ extraFlags = RTLD_NOLOAD;
+ }
+
+ std::string fullPath = directory + libraryName;
+ return OpenPosixLibrary(fullPath, extraFlags, errorOut);
+}
+
+void *GetLibrarySymbol(void *libraryHandle, const char *symbolName)
+{
+ if (!libraryHandle)
+ {
+ return nullptr;
+ }
+
+ return dlsym(libraryHandle, symbolName);
+}
+
+std::string GetLibraryPath(void *libraryHandle)
+{
+ if (!libraryHandle)
+ {
+ return "";
+ }
+
+ return GetModulePath(libraryHandle);
+}
+
+void CloseSystemLibrary(void *libraryHandle)
+{
+ if (libraryHandle)
+ {
+ dlclose(libraryHandle);
+ }
+}
+
+bool IsDirectory(const char *filename)
+{
+ struct stat st;
+ int result = stat(filename, &st);
+ return result == 0 && ((st.st_mode & S_IFDIR) == S_IFDIR);
+}
+
+bool IsDebuggerAttached()
+{
+ // This could have a fuller implementation.
+ // See https://cs.chromium.org/chromium/src/base/debug/debugger_posix.cc
+ return false;
+}
+
+void BreakDebugger()
+{
+ // This could have a fuller implementation.
+ // See https://cs.chromium.org/chromium/src/base/debug/debugger_posix.cc
+ abort();
+}
+
+const char *GetExecutableExtension()
+{
+ return "";
+}
+
+char GetPathSeparator()
+{
+ return '/';
+}
+
+std::string GetRootDirectory()
+{
+ return "/";
+}
+
+Optional<std::string> GetTempDirectory()
+{
+ const char *tmp = getenv("TMPDIR");
+ if (tmp != nullptr)
+ {
+ return std::string(tmp);
+ }
+
+#if defined(ANGLE_PLATFORM_ANDROID)
+ // Not used right now in the ANGLE test runner.
+ // return PathService::Get(DIR_CACHE, path);
+ return Optional<std::string>::Invalid();
+#else
+ return std::string("/tmp");
+#endif
+}
+
+Optional<std::string> CreateTemporaryFileInDirectory(const std::string &directory)
+{
+ std::string tempFileTemplate = directory + "/.angle.XXXXXX";
+
+ char tempFile[1000];
+ strcpy(tempFile, tempFileTemplate.c_str());
+
+ int fd = mkstemp(tempFile);
+ close(fd);
+
+ if (fd != -1)
+ {
+ return std::string(tempFile);
+ }
+
+ return Optional<std::string>::Invalid();
+}
+
+double GetCurrentProcessCpuTime()
+{
+#ifdef ANGLE_PLATFORM_FUCHSIA
+ static zx_handle_t me = zx_process_self();
+ zx_info_task_runtime_t task_runtime;
+ zx_object_get_info(me, ZX_INFO_TASK_RUNTIME, &task_runtime, sizeof(task_runtime), nullptr,
+ nullptr);
+ return static_cast<double>(task_runtime.cpu_time) * 1e-9;
+#else
+ // We could also have used /proc/stat, but that requires us to read the
+ // filesystem and convert from jiffies. /proc/stat also relies on jiffies
+ // (lower resolution) while getrusage can potentially use a sched_clock()
+ // underneath that has higher resolution.
+ struct rusage usage;
+ getrusage(RUSAGE_SELF, &usage);
+ double userTime = usage.ru_utime.tv_sec + usage.ru_utime.tv_usec * 1e-6;
+ double systemTime = usage.ru_stime.tv_sec + usage.ru_stime.tv_usec * 1e-6;
+ return userTime + systemTime;
+#endif
+}
+
+namespace
+{
+bool SetMemoryProtection(uintptr_t start, size_t size, int protections)
+{
+ int ret = mprotect(reinterpret_cast<void *>(start), size, protections);
+ if (ret < 0)
+ {
+ perror("mprotect failed");
+ }
+ return ret == 0;
+}
+
+class PosixPageFaultHandler : public PageFaultHandler
+{
+ public:
+ PosixPageFaultHandler(PageFaultCallback callback) : PageFaultHandler(callback) {}
+ ~PosixPageFaultHandler() override {}
+
+ bool enable() override;
+ bool disable() override;
+ void handle(int sig, siginfo_t *info, void *unused);
+
+ private:
+ struct sigaction mDefaultBusAction = {};
+ struct sigaction mDefaultSegvAction = {};
+};
+
+PosixPageFaultHandler *gPosixPageFaultHandler = nullptr;
+void SegfaultHandlerFunction(int sig, siginfo_t *info, void *unused)
+{
+ gPosixPageFaultHandler->handle(sig, info, unused);
+}
+
+void PosixPageFaultHandler::handle(int sig, siginfo_t *info, void *unused)
+{
+ bool found = false;
+ if ((sig == SIGSEGV || sig == SIGBUS) &&
+ (info->si_code == SEGV_ACCERR || info->si_code == SEGV_MAPERR))
+ {
+ found = mCallback(reinterpret_cast<uintptr_t>(info->si_addr)) ==
+ PageFaultHandlerRangeType::InRange;
+ }
+
+ // Fall back to default signal handler
+ if (!found)
+ {
+ if (sig == SIGSEGV)
+ {
+ mDefaultSegvAction.sa_sigaction(sig, info, unused);
+ }
+ else if (sig == SIGBUS)
+ {
+ mDefaultBusAction.sa_sigaction(sig, info, unused);
+ }
+ else
+ {
+ UNREACHABLE();
+ }
+ }
+}
+
+bool PosixPageFaultHandler::disable()
+{
+ return sigaction(SIGSEGV, &mDefaultSegvAction, nullptr) == 0 &&
+ sigaction(SIGBUS, &mDefaultBusAction, nullptr) == 0;
+}
+
+bool PosixPageFaultHandler::enable()
+{
+ struct sigaction sigAction = {};
+ sigAction.sa_flags = SA_SIGINFO;
+ sigAction.sa_sigaction = &SegfaultHandlerFunction;
+ sigemptyset(&sigAction.sa_mask);
+
+ // Some POSIX implementations use SIGBUS for mprotect faults
+ return sigaction(SIGSEGV, &sigAction, &mDefaultSegvAction) == 0 &&
+ sigaction(SIGBUS, &sigAction, &mDefaultBusAction) == 0;
+}
+} // namespace
+
+// Set write protection
+bool ProtectMemory(uintptr_t start, size_t size)
+{
+ return SetMemoryProtection(start, size, PROT_READ);
+}
+
+// Allow reading and writing
+bool UnprotectMemory(uintptr_t start, size_t size)
+{
+ return SetMemoryProtection(start, size, PROT_READ | PROT_WRITE);
+}
+
+size_t GetPageSize()
+{
+ long pageSize = sysconf(_SC_PAGE_SIZE);
+ if (pageSize < 0)
+ {
+ perror("Could not get sysconf page size");
+ return 0;
+ }
+ return static_cast<size_t>(pageSize);
+}
+
+PageFaultHandler *CreatePageFaultHandler(PageFaultCallback callback)
+{
+ gPosixPageFaultHandler = new PosixPageFaultHandler(callback);
+ return gPosixPageFaultHandler;
+}
+
+uint64_t GetProcessMemoryUsageKB()
+{
+ FILE *file = fopen("/proc/self/status", "r");
+
+ if (!file)
+ {
+ return 0;
+ }
+
+ const char *kSearchString = "VmRSS:";
+ constexpr size_t kMaxLineSize = 100;
+ std::array<char, kMaxLineSize> line = {};
+
+ uint64_t kb = 0;
+
+ while (fgets(line.data(), line.size(), file) != nullptr)
+ {
+ if (strncmp(line.data(), kSearchString, strlen(kSearchString)) == 0)
+ {
+ std::vector<std::string> strings;
+ SplitStringAlongWhitespace(line.data(), &strings);
+
+ sscanf(strings[1].c_str(), "%" SCNu64, &kb);
+ break;
+ }
+ }
+ fclose(file);
+
+ return kb;
+}
+} // namespace angle
diff --git a/gfx/angle/checkout/src/common/system_utils_win.cpp b/gfx/angle/checkout/src/common/system_utils_win.cpp
new file mode 100644
index 0000000000..8770235cd7
--- /dev/null
+++ b/gfx/angle/checkout/src/common/system_utils_win.cpp
@@ -0,0 +1,264 @@
+//
+// Copyright 2014 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+
+// system_utils_win.cpp: Implementation of OS-specific functions for Windows
+
+#include "system_utils.h"
+
+#include <stdarg.h>
+#include <windows.h>
+#include <array>
+#include <vector>
+
+namespace angle
+{
+
+namespace
+{
+
+std::string GetPath(HMODULE module)
+{
+ std::array<wchar_t, MAX_PATH> executableFileBuf;
+ DWORD executablePathLen = GetModuleFileNameW(module, executableFileBuf.data(),
+ static_cast<DWORD>(executableFileBuf.size()));
+ return Narrow(executablePathLen > 0 ? executableFileBuf.data() : L"");
+}
+
+std::string GetDirectory(HMODULE module)
+{
+ std::string executablePath = GetPath(module);
+ return StripFilenameFromPath(executablePath);
+}
+
+} // anonymous namespace
+
+std::string GetExecutablePath()
+{
+ return GetPath(nullptr);
+}
+
+std::string GetExecutableDirectory()
+{
+ return GetDirectory(nullptr);
+}
+
+const char *GetSharedLibraryExtension()
+{
+ return "dll";
+}
+
+Optional<std::string> GetCWD()
+{
+ std::array<wchar_t, MAX_PATH> pathBuf;
+ DWORD result = GetCurrentDirectoryW(static_cast<DWORD>(pathBuf.size()), pathBuf.data());
+ if (result == 0)
+ {
+ return Optional<std::string>::Invalid();
+ }
+ return Narrow(pathBuf.data());
+}
+
+bool SetCWD(const char *dirName)
+{
+ return (SetCurrentDirectoryW(Widen(dirName).c_str()) == TRUE);
+}
+
+const char *GetPathSeparatorForEnvironmentVar()
+{
+ return ";";
+}
+
+double GetCurrentSystemTime()
+{
+ LARGE_INTEGER frequency = {};
+ QueryPerformanceFrequency(&frequency);
+
+ LARGE_INTEGER curTime;
+ QueryPerformanceCounter(&curTime);
+
+ return static_cast<double>(curTime.QuadPart) / frequency.QuadPart;
+}
+
+double GetCurrentProcessCpuTime()
+{
+ FILETIME creationTime = {};
+ FILETIME exitTime = {};
+ FILETIME kernelTime = {};
+ FILETIME userTime = {};
+
+ // Note this will not give accurate results if the current thread is
+ // scheduled for less than the tick rate, which is often 15 ms. In that
+ // case, GetProcessTimes will not return different values, making it
+ // possible to end up with 0 ms for a process that takes 93% of a core
+ // (14/15 ms)! An alternative is QueryProcessCycleTime but there is no
+ // simple way to convert cycles back to seconds, and on top of that, it's
+ // not supported pre-Windows Vista.
+
+ // Returns 100-ns intervals, so we want to divide by 1e7 to get seconds
+ GetProcessTimes(GetCurrentProcess(), &creationTime, &exitTime, &kernelTime, &userTime);
+
+ ULARGE_INTEGER kernelInt64;
+ kernelInt64.LowPart = kernelTime.dwLowDateTime;
+ kernelInt64.HighPart = kernelTime.dwHighDateTime;
+ double systemTimeSeconds = static_cast<double>(kernelInt64.QuadPart) * 1e-7;
+
+ ULARGE_INTEGER userInt64;
+ userInt64.LowPart = userTime.dwLowDateTime;
+ userInt64.HighPart = userTime.dwHighDateTime;
+ double userTimeSeconds = static_cast<double>(userInt64.QuadPart) * 1e-7;
+
+ return systemTimeSeconds + userTimeSeconds;
+}
+
+bool IsDirectory(const char *filename)
+{
+ WIN32_FILE_ATTRIBUTE_DATA fileInformation;
+
+ BOOL result =
+ GetFileAttributesExW(Widen(filename).c_str(), GetFileExInfoStandard, &fileInformation);
+ if (result)
+ {
+ DWORD attribs = fileInformation.dwFileAttributes;
+ return (attribs != INVALID_FILE_ATTRIBUTES) && ((attribs & FILE_ATTRIBUTE_DIRECTORY) > 0);
+ }
+
+ return false;
+}
+
+bool IsDebuggerAttached()
+{
+ return !!::IsDebuggerPresent();
+}
+
+void BreakDebugger()
+{
+ __debugbreak();
+}
+
+const char *GetExecutableExtension()
+{
+ return ".exe";
+}
+
+char GetPathSeparator()
+{
+ return '\\';
+}
+
+std::string GetModuleDirectory()
+{
+// GetModuleHandleEx is unavailable on UWP
+#if !defined(ANGLE_IS_WINUWP)
+ static int placeholderSymbol = 0;
+ HMODULE module = nullptr;
+ if (GetModuleHandleExW(
+ GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS | GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,
+ reinterpret_cast<LPCWSTR>(&placeholderSymbol), &module))
+ {
+ return GetDirectory(module);
+ }
+#endif
+ return GetDirectory(nullptr);
+}
+
+std::string GetRootDirectory()
+{
+ return "C:\\";
+}
+
+Optional<std::string> GetTempDirectory()
+{
+ char tempDirOut[MAX_PATH + 1];
+ GetTempPathA(MAX_PATH + 1, tempDirOut);
+ std::string tempDir = std::string(tempDirOut);
+
+ if (tempDir.length() < 0 || tempDir.length() > MAX_PATH)
+ {
+ return Optional<std::string>::Invalid();
+ }
+
+ if (tempDir.length() > 0 && tempDir.back() == '\\')
+ {
+ tempDir.pop_back();
+ }
+
+ return tempDir;
+}
+
+Optional<std::string> CreateTemporaryFileInDirectory(const std::string &directory)
+{
+ char fileName[MAX_PATH + 1];
+ if (GetTempFileNameA(directory.c_str(), "ANGLE", 0, fileName) == 0)
+ return Optional<std::string>::Invalid();
+
+ return std::string(fileName);
+}
+
+std::string GetLibraryPath(void *libraryHandle)
+{
+ if (!libraryHandle)
+ {
+ return "";
+ }
+
+ std::array<wchar_t, MAX_PATH> buffer;
+ if (GetModuleFileNameW(reinterpret_cast<HMODULE>(libraryHandle), buffer.data(),
+ buffer.size()) == 0)
+ {
+ return "";
+ }
+
+ return Narrow(buffer.data());
+}
+
+void *GetLibrarySymbol(void *libraryHandle, const char *symbolName)
+{
+ if (!libraryHandle)
+ {
+ fprintf(stderr, "Module was not loaded\n");
+ return nullptr;
+ }
+
+ return reinterpret_cast<void *>(
+ GetProcAddress(reinterpret_cast<HMODULE>(libraryHandle), symbolName));
+}
+
+void CloseSystemLibrary(void *libraryHandle)
+{
+ if (libraryHandle)
+ {
+ FreeLibrary(reinterpret_cast<HMODULE>(libraryHandle));
+ }
+}
+std::string Narrow(const std::wstring_view &utf16)
+{
+ if (utf16.empty())
+ {
+ return {};
+ }
+ int requiredSize = WideCharToMultiByte(CP_UTF8, 0, utf16.data(), static_cast<int>(utf16.size()),
+ nullptr, 0, nullptr, nullptr);
+ std::string utf8(requiredSize, '\0');
+ WideCharToMultiByte(CP_UTF8, 0, utf16.data(), static_cast<int>(utf16.size()), &utf8[0],
+ requiredSize, nullptr, nullptr);
+ return utf8;
+}
+
+std::wstring Widen(const std::string_view &utf8)
+{
+ if (utf8.empty())
+ {
+ return {};
+ }
+ int requiredSize =
+ MultiByteToWideChar(CP_UTF8, 0, utf8.data(), static_cast<int>(utf8.size()), nullptr, 0);
+ std::wstring utf16(requiredSize, L'\0');
+ MultiByteToWideChar(CP_UTF8, 0, utf8.data(), static_cast<int>(utf8.size()), &utf16[0],
+ requiredSize);
+ return utf16;
+}
+
+} // namespace angle
diff --git a/gfx/angle/checkout/src/common/system_utils_win32.cpp b/gfx/angle/checkout/src/common/system_utils_win32.cpp
new file mode 100644
index 0000000000..5bcfa2347e
--- /dev/null
+++ b/gfx/angle/checkout/src/common/system_utils_win32.cpp
@@ -0,0 +1,235 @@
+//
+// Copyright 2019 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// system_utils_win32.cpp: Implementation of OS-specific functions for Windows.
+
+#include "common/FastVector.h"
+#include "system_utils.h"
+
+#include <array>
+
+// Must be included in this order.
+// clang-format off
+#include <windows.h>
+#include <psapi.h>
+// clang-format on
+
+namespace angle
+{
+bool UnsetEnvironmentVar(const char *variableName)
+{
+ return (SetEnvironmentVariableW(Widen(variableName).c_str(), nullptr) == TRUE);
+}
+
+bool SetEnvironmentVar(const char *variableName, const char *value)
+{
+ return (SetEnvironmentVariableW(Widen(variableName).c_str(), Widen(value).c_str()) == TRUE);
+}
+
+std::string GetEnvironmentVar(const char *variableName)
+{
+ std::wstring variableNameUtf16 = Widen(variableName);
+ FastVector<wchar_t, MAX_PATH> value;
+
+ DWORD result;
+
+ // First get the length of the variable, including the null terminator
+ result = GetEnvironmentVariableW(variableNameUtf16.c_str(), nullptr, 0);
+
+ // Zero means the variable was not found, so return now.
+ if (result == 0)
+ {
+ return std::string();
+ }
+
+ // Now size the vector to fit the data, and read the environment variable.
+ value.resize(result, 0);
+ result = GetEnvironmentVariableW(variableNameUtf16.c_str(), value.data(), result);
+
+ return Narrow(value.data());
+}
+
+void *OpenSystemLibraryWithExtensionAndGetError(const char *libraryName,
+ SearchType searchType,
+ std::string *errorOut)
+{
+ char buffer[MAX_PATH];
+ int ret = snprintf(buffer, MAX_PATH, "%s.%s", libraryName, GetSharedLibraryExtension());
+ if (ret <= 0 || ret >= MAX_PATH)
+ {
+ fprintf(stderr, "Error loading shared library: 0x%x", ret);
+ return nullptr;
+ }
+
+ HMODULE libraryModule = nullptr;
+
+ switch (searchType)
+ {
+ case SearchType::ModuleDir:
+ {
+ std::string moduleRelativePath = ConcatenatePath(GetModuleDirectory(), libraryName);
+ if (errorOut)
+ {
+ *errorOut = moduleRelativePath;
+ }
+ libraryModule = LoadLibraryW(Widen(moduleRelativePath).c_str());
+ break;
+ }
+
+ case SearchType::SystemDir:
+ {
+ if (errorOut)
+ {
+ *errorOut = libraryName;
+ }
+ libraryModule =
+ LoadLibraryExW(Widen(libraryName).c_str(), nullptr, LOAD_LIBRARY_SEARCH_SYSTEM32);
+ break;
+ }
+
+ case SearchType::AlreadyLoaded:
+ {
+ if (errorOut)
+ {
+ *errorOut = libraryName;
+ }
+ libraryModule = GetModuleHandleW(Widen(libraryName).c_str());
+ break;
+ }
+ }
+
+ return reinterpret_cast<void *>(libraryModule);
+}
+
+namespace
+{
+class Win32PageFaultHandler : public PageFaultHandler
+{
+ public:
+ Win32PageFaultHandler(PageFaultCallback callback) : PageFaultHandler(callback) {}
+ ~Win32PageFaultHandler() override {}
+
+ bool enable() override;
+ bool disable() override;
+
+ LONG handle(PEXCEPTION_POINTERS pExceptionInfo);
+
+ private:
+ void *mVectoredExceptionHandler = nullptr;
+};
+
+Win32PageFaultHandler *gWin32PageFaultHandler = nullptr;
+static LONG CALLBACK VectoredExceptionHandler(PEXCEPTION_POINTERS info)
+{
+ return gWin32PageFaultHandler->handle(info);
+}
+
+bool SetMemoryProtection(uintptr_t start, size_t size, DWORD protections)
+{
+ DWORD oldProtect;
+ BOOL res = VirtualProtect(reinterpret_cast<LPVOID>(start), size, protections, &oldProtect);
+ if (!res)
+ {
+ DWORD lastError = GetLastError();
+ fprintf(stderr, "VirtualProtect failed: 0x%lx\n", lastError);
+ return false;
+ }
+
+ return true;
+}
+
+LONG Win32PageFaultHandler::handle(PEXCEPTION_POINTERS info)
+{
+ bool found = false;
+
+ if (info->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION &&
+ info->ExceptionRecord->NumberParameters >= 2 &&
+ info->ExceptionRecord->ExceptionInformation[0] == 1)
+ {
+ found = mCallback(static_cast<uintptr_t>(info->ExceptionRecord->ExceptionInformation[1])) ==
+ PageFaultHandlerRangeType::InRange;
+ }
+
+ if (found)
+ {
+ return EXCEPTION_CONTINUE_EXECUTION;
+ }
+ else
+ {
+ return EXCEPTION_CONTINUE_SEARCH;
+ }
+}
+
+bool Win32PageFaultHandler::disable()
+{
+ if (mVectoredExceptionHandler)
+ {
+ ULONG res = RemoveVectoredExceptionHandler(mVectoredExceptionHandler);
+ mVectoredExceptionHandler = nullptr;
+ if (res == 0)
+ {
+ DWORD lastError = GetLastError();
+ fprintf(stderr, "RemoveVectoredExceptionHandler failed: 0x%lx\n", lastError);
+ return false;
+ }
+ }
+ return true;
+}
+
+bool Win32PageFaultHandler::enable()
+{
+ if (mVectoredExceptionHandler)
+ {
+ return true;
+ }
+
+ PVECTORED_EXCEPTION_HANDLER handler =
+ reinterpret_cast<PVECTORED_EXCEPTION_HANDLER>(&VectoredExceptionHandler);
+
+ mVectoredExceptionHandler = AddVectoredExceptionHandler(1, handler);
+
+ if (!mVectoredExceptionHandler)
+ {
+ DWORD lastError = GetLastError();
+ fprintf(stderr, "AddVectoredExceptionHandler failed: 0x%lx\n", lastError);
+ return false;
+ }
+ return true;
+}
+} // namespace
+
+// Set write protection
+bool ProtectMemory(uintptr_t start, size_t size)
+{
+ return SetMemoryProtection(start, size, PAGE_READONLY);
+}
+
+// Allow reading and writing
+bool UnprotectMemory(uintptr_t start, size_t size)
+{
+ return SetMemoryProtection(start, size, PAGE_READWRITE);
+}
+
+size_t GetPageSize()
+{
+ SYSTEM_INFO info;
+ GetSystemInfo(&info);
+ return static_cast<size_t>(info.dwPageSize);
+}
+
+PageFaultHandler *CreatePageFaultHandler(PageFaultCallback callback)
+{
+ gWin32PageFaultHandler = new Win32PageFaultHandler(callback);
+ return gWin32PageFaultHandler;
+}
+
+uint64_t GetProcessMemoryUsageKB()
+{
+ PROCESS_MEMORY_COUNTERS_EX pmc;
+ ::GetProcessMemoryInfo(::GetCurrentProcess(), reinterpret_cast<PROCESS_MEMORY_COUNTERS *>(&pmc),
+ sizeof(pmc));
+ return static_cast<uint64_t>(pmc.PrivateUsage) / 1024ull;
+}
+} // namespace angle
diff --git a/gfx/angle/checkout/src/common/third_party/base/anglebase/base_export.h b/gfx/angle/checkout/src/common/third_party/base/anglebase/base_export.h
new file mode 100644
index 0000000000..426047a992
--- /dev/null
+++ b/gfx/angle/checkout/src/common/third_party/base/anglebase/base_export.h
@@ -0,0 +1,13 @@
+//
+// Copyright 2017 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// base_export.h: Compatiblity hacks for importing Chromium's base/SHA1.
+
+#ifndef ANGLEBASE_BASE_EXPORT_H_
+#define ANGLEBASE_BASE_EXPORT_H_
+
+#define ANGLEBASE_EXPORT
+
+#endif // ANGLEBASE_BASE_EXPORT_H_
diff --git a/gfx/angle/checkout/src/common/third_party/base/anglebase/containers/mru_cache.h b/gfx/angle/checkout/src/common/third_party/base/anglebase/containers/mru_cache.h
new file mode 100644
index 0000000000..30b564aff6
--- /dev/null
+++ b/gfx/angle/checkout/src/common/third_party/base/anglebase/containers/mru_cache.h
@@ -0,0 +1,275 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains a template for a Most Recently Used cache that allows
+// constant-time access to items using a key, but easy identification of the
+// least-recently-used items for removal. Each key can only be associated with
+// one payload item at a time.
+//
+// The key object will be stored twice, so it should support efficient copying.
+//
+// NOTE: While all operations are O(1), this code is written for
+// legibility rather than optimality. If future profiling identifies this as
+// a bottleneck, there is room for smaller values of 1 in the O(1). :]
+
+#ifndef ANGLEBASE_CONTAINERS_MRU_CACHE_H_
+#define ANGLEBASE_CONTAINERS_MRU_CACHE_H_
+
+#include <stddef.h>
+
+#include <algorithm>
+#include <functional>
+#include <list>
+#include <map>
+#include <unordered_map>
+#include <utility>
+
+#include "anglebase/logging.h"
+#include "anglebase/macros.h"
+
+namespace angle
+{
+
+namespace base
+{
+
+// MRUCacheBase ----------------------------------------------------------------
+
+// This template is used to standardize map type containers that can be used
+// by MRUCacheBase. This level of indirection is necessary because of the way
+// that template template params and default template params interact.
+template <class KeyType, class ValueType, class CompareType>
+struct MRUCacheStandardMap
+{
+ typedef std::map<KeyType, ValueType, CompareType> Type;
+};
+
+// Base class for the MRU cache specializations defined below.
+template <class KeyType,
+ class PayloadType,
+ class HashOrCompareType,
+ template <typename, typename, typename> class MapType = MRUCacheStandardMap>
+class MRUCacheBase
+{
+ public:
+ // The payload of the list. This maintains a copy of the key so we can
+ // efficiently delete things given an element of the list.
+ typedef std::pair<KeyType, PayloadType> value_type;
+
+ private:
+ typedef std::list<value_type> PayloadList;
+ typedef
+ typename MapType<KeyType, typename PayloadList::iterator, HashOrCompareType>::Type KeyIndex;
+
+ public:
+ typedef typename PayloadList::size_type size_type;
+
+ typedef typename PayloadList::iterator iterator;
+ typedef typename PayloadList::const_iterator const_iterator;
+ typedef typename PayloadList::reverse_iterator reverse_iterator;
+ typedef typename PayloadList::const_reverse_iterator const_reverse_iterator;
+
+ enum
+ {
+ NO_AUTO_EVICT = 0
+ };
+
+ // The max_size is the size at which the cache will prune its members to when
+ // a new item is inserted. If the caller wants to manager this itself (for
+ // example, maybe it has special work to do when something is evicted), it
+ // can pass NO_AUTO_EVICT to not restrict the cache size.
+ explicit MRUCacheBase(size_type max_size) : max_size_(max_size) {}
+
+ virtual ~MRUCacheBase() {}
+
+ size_type max_size() const { return max_size_; }
+
+ // Inserts a payload item with the given key. If an existing item has
+ // the same key, it is removed prior to insertion. An iterator indicating the
+ // inserted item will be returned (this will always be the front of the list).
+ //
+ // The payload will be forwarded.
+ template <typename Payload>
+ iterator Put(const KeyType &key, Payload &&payload)
+ {
+ // Remove any existing payload with that key.
+ typename KeyIndex::iterator index_iter = index_.find(key);
+ if (index_iter != index_.end())
+ {
+ // Erase the reference to it. The index reference will be replaced in the
+ // code below.
+ Erase(index_iter->second);
+ }
+ else if (max_size_ != NO_AUTO_EVICT)
+ {
+ // New item is being inserted which might make it larger than the maximum
+ // size: kick the oldest thing out if necessary.
+ ShrinkToSize(max_size_ - 1);
+ }
+
+ ordering_.emplace_front(key, std::forward<Payload>(payload));
+ index_.emplace(key, ordering_.begin());
+ return ordering_.begin();
+ }
+
+ // Retrieves the contents of the given key, or end() if not found. This method
+ // has the side effect of moving the requested item to the front of the
+ // recency list.
+ iterator Get(const KeyType &key)
+ {
+ typename KeyIndex::iterator index_iter = index_.find(key);
+ if (index_iter == index_.end())
+ return end();
+ typename PayloadList::iterator iter = index_iter->second;
+
+ // Move the touched item to the front of the recency ordering.
+ ordering_.splice(ordering_.begin(), ordering_, iter);
+ return ordering_.begin();
+ }
+
+ // Retrieves the payload associated with a given key and returns it via
+ // result without affecting the ordering (unlike Get).
+ iterator Peek(const KeyType &key)
+ {
+ typename KeyIndex::const_iterator index_iter = index_.find(key);
+ if (index_iter == index_.end())
+ return end();
+ return index_iter->second;
+ }
+
+ const_iterator Peek(const KeyType &key) const
+ {
+ typename KeyIndex::const_iterator index_iter = index_.find(key);
+ if (index_iter == index_.end())
+ return end();
+ return index_iter->second;
+ }
+
+ // Exchanges the contents of |this| by the contents of the |other|.
+ void Swap(MRUCacheBase &other)
+ {
+ ordering_.swap(other.ordering_);
+ index_.swap(other.index_);
+ std::swap(max_size_, other.max_size_);
+ }
+
+ // Erases the item referenced by the given iterator. An iterator to the item
+ // following it will be returned. The iterator must be valid.
+ iterator Erase(iterator pos)
+ {
+ index_.erase(pos->first);
+ return ordering_.erase(pos);
+ }
+
+ // MRUCache entries are often processed in reverse order, so we add this
+ // convenience function (not typically defined by STL containers).
+ reverse_iterator Erase(reverse_iterator pos)
+ {
+ // We have to actually give it the incremented iterator to delete, since
+ // the forward iterator that base() returns is actually one past the item
+ // being iterated over.
+ return reverse_iterator(Erase((++pos).base()));
+ }
+
+ // Shrinks the cache so it only holds |new_size| items. If |new_size| is
+ // bigger or equal to the current number of items, this will do nothing.
+ void ShrinkToSize(size_type new_size)
+ {
+ for (size_type i = size(); i > new_size; i--)
+ Erase(rbegin());
+ }
+
+ // Deletes everything from the cache.
+ void Clear()
+ {
+ index_.clear();
+ ordering_.clear();
+ }
+
+ // Returns the number of elements in the cache.
+ size_type size() const
+ {
+ // We don't use ordering_.size() for the return value because
+ // (as a linked list) it can be O(n).
+ DCHECK(index_.size() == ordering_.size());
+ return index_.size();
+ }
+
+ // Allows iteration over the list. Forward iteration starts with the most
+ // recent item and works backwards.
+ //
+ // Note that since these iterators are actually iterators over a list, you
+ // can keep them as you insert or delete things (as long as you don't delete
+ // the one you are pointing to) and they will still be valid.
+ iterator begin() { return ordering_.begin(); }
+ const_iterator begin() const { return ordering_.begin(); }
+ iterator end() { return ordering_.end(); }
+ const_iterator end() const { return ordering_.end(); }
+
+ reverse_iterator rbegin() { return ordering_.rbegin(); }
+ const_reverse_iterator rbegin() const { return ordering_.rbegin(); }
+ reverse_iterator rend() { return ordering_.rend(); }
+ const_reverse_iterator rend() const { return ordering_.rend(); }
+
+ bool empty() const { return ordering_.empty(); }
+
+ private:
+ PayloadList ordering_;
+ KeyIndex index_;
+
+ size_type max_size_;
+
+ DISALLOW_COPY_AND_ASSIGN(MRUCacheBase);
+};
+
+// MRUCache --------------------------------------------------------------------
+
+// A container that does not do anything to free its data. Use this when storing
+// value types (as opposed to pointers) in the list.
+template <class KeyType, class PayloadType, class CompareType = std::less<KeyType>>
+class MRUCache : public MRUCacheBase<KeyType, PayloadType, CompareType>
+{
+ private:
+ using ParentType = MRUCacheBase<KeyType, PayloadType, CompareType>;
+
+ public:
+ // See MRUCacheBase, noting the possibility of using NO_AUTO_EVICT.
+ explicit MRUCache(typename ParentType::size_type max_size) : ParentType(max_size) {}
+ virtual ~MRUCache() {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MRUCache);
+};
+
+// HashingMRUCache ------------------------------------------------------------
+
+template <class KeyType, class ValueType, class HashType>
+struct MRUCacheHashMap
+{
+ typedef std::unordered_map<KeyType, ValueType, HashType> Type;
+};
+
+// This class is similar to MRUCache, except that it uses std::unordered_map as
+// the map type instead of std::map. Note that your KeyType must be hashable to
+// use this cache or you need to provide a hashing class.
+template <class KeyType, class PayloadType, class HashType = std::hash<KeyType>>
+class HashingMRUCache : public MRUCacheBase<KeyType, PayloadType, HashType, MRUCacheHashMap>
+{
+ private:
+ using ParentType = MRUCacheBase<KeyType, PayloadType, HashType, MRUCacheHashMap>;
+
+ public:
+ // See MRUCacheBase, noting the possibility of using NO_AUTO_EVICT.
+ explicit HashingMRUCache(typename ParentType::size_type max_size) : ParentType(max_size) {}
+ virtual ~HashingMRUCache() override {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HashingMRUCache);
+};
+
+} // namespace base
+
+} // namespace angle
+
+#endif // ANGLEBASE_CONTAINERS_MRU_CACHE_H_
diff --git a/gfx/angle/checkout/src/common/third_party/base/anglebase/logging.h b/gfx/angle/checkout/src/common/third_party/base/anglebase/logging.h
new file mode 100644
index 0000000000..73f81e87f2
--- /dev/null
+++ b/gfx/angle/checkout/src/common/third_party/base/anglebase/logging.h
@@ -0,0 +1,26 @@
+//
+// Copyright 2016 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// logging.h: Compatiblity hacks for importing Chromium's base/numerics.
+
+#ifndef ANGLEBASE_LOGGING_H_
+#define ANGLEBASE_LOGGING_H_
+
+#include "common/debug.h"
+
+#ifndef DCHECK
+# define DCHECK(X) ASSERT(X)
+#endif
+
+#ifndef CHECK
+# define CHECK(X) ASSERT(X)
+#endif
+
+// Unfortunately ANGLE relies on ASSERT being an empty statement, which these libs don't respect.
+#ifndef NOTREACHED
+# define NOTREACHED() ({ UNREACHABLE(); })
+#endif
+
+#endif // ANGLEBASE_LOGGING_H_
diff --git a/gfx/angle/checkout/src/common/third_party/base/anglebase/macros.h b/gfx/angle/checkout/src/common/third_party/base/anglebase/macros.h
new file mode 100644
index 0000000000..06391784e4
--- /dev/null
+++ b/gfx/angle/checkout/src/common/third_party/base/anglebase/macros.h
@@ -0,0 +1,17 @@
+//
+// Copyright 2017 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// macros.h: Compatiblity hacks for importing Chromium's MRUCache.
+
+#ifndef ANGLEBASE_MACROS_H_
+#define ANGLEBASE_MACROS_H_
+
+// A macro to disallow the copy constructor and operator= functions.
+// This should be used in the private: declarations for a class.
+#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
+ TypeName(const TypeName &) = delete; \
+ void operator=(const TypeName &) = delete
+
+#endif // ANGLEBASE_MACROS_H_
diff --git a/gfx/angle/checkout/src/common/third_party/base/anglebase/no_destructor.h b/gfx/angle/checkout/src/common/third_party/base/anglebase/no_destructor.h
new file mode 100644
index 0000000000..5090dd9817
--- /dev/null
+++ b/gfx/angle/checkout/src/common/third_party/base/anglebase/no_destructor.h
@@ -0,0 +1,106 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef ANGLEBASE_NO_DESTRUCTOR_H_
+#define ANGLEBASE_NO_DESTRUCTOR_H_
+
+#include <new>
+#include <utility>
+
+namespace angle
+{
+
+namespace base
+{
+
+// A wrapper that makes it easy to create an object of type T with static
+// storage duration that:
+// - is only constructed on first access
+// - never invokes the destructor
+// in order to satisfy the styleguide ban on global constructors and
+// destructors.
+//
+// Runtime constant example:
+// const std::string& GetLineSeparator() {
+// // Forwards to std::string(size_t, char, const Allocator&) constructor.
+// static const base::NoDestructor<std::string> s(5, '-');
+// return *s;
+// }
+//
+// More complex initialization with a lambda:
+// const std::string& GetSessionNonce() {
+// static const base::NoDestructor<std::string> nonce([] {
+// std::string s(16);
+// crypto::RandString(s.data(), s.size());
+// return s;
+// }());
+// return *nonce;
+// }
+//
+// NoDestructor<T> stores the object inline, so it also avoids a pointer
+// indirection and a malloc. Also note that since C++11 static local variable
+// initialization is thread-safe and so is this pattern. Code should prefer to
+// use NoDestructor<T> over:
+// - A function scoped static T* or T& that is dynamically initialized.
+// - A global base::LazyInstance<T>.
+//
+// Note that since the destructor is never run, this *will* leak memory if used
+// as a stack or member variable. Furthermore, a NoDestructor<T> should never
+// have global scope as that may require a static initializer.
+template <typename T>
+class NoDestructor
+{
+ public:
+ // Not constexpr; just write static constexpr T x = ...; if the value should
+ // be a constexpr.
+ template <typename... Args>
+ explicit NoDestructor(Args &&... args)
+ {
+ new (storage_) T(std::forward<Args>(args)...);
+ }
+
+ // Allows copy and move construction of the contained type, to allow
+ // construction from an initializer list, e.g. for std::vector.
+ explicit NoDestructor(const T &x) { new (storage_) T(x); }
+ explicit NoDestructor(T &&x) { new (storage_) T(std::move(x)); }
+
+ NoDestructor(const NoDestructor &) = delete;
+ NoDestructor &operator=(const NoDestructor &) = delete;
+
+ ~NoDestructor() = default;
+
+ const T &operator*() const { return *get(); }
+ T &operator*() { return *get(); }
+
+ const T *operator->() const { return get(); }
+ T *operator->() { return get(); }
+
+ const T *get() const { return reinterpret_cast<const T *>(storage_); }
+ T *get() { return reinterpret_cast<T *>(storage_); }
+
+ private:
+ alignas(T) char storage_[sizeof(T)];
+
+#if defined(LEAK_SANITIZER)
+ // TODO(https://crbug.com/812277): This is a hack to work around the fact
+ // that LSan doesn't seem to treat NoDestructor as a root for reachability
+ // analysis. This means that code like this:
+ // static base::NoDestructor<std::vector<int>> v({1, 2, 3});
+ // is considered a leak. Using the standard leak sanitizer annotations to
+ // suppress leaks doesn't work: std::vector is implicitly constructed before
+ // calling the base::NoDestructor constructor.
+ //
+ // Unfortunately, I haven't been able to demonstrate this issue in simpler
+ // reproductions: until that's resolved, hold an explicit pointer to the
+ // placement-new'd object in leak sanitizer mode to help LSan realize that
+ // objects allocated by the contained type are still reachable.
+ T *storage_ptr_ = reinterpret_cast<T *>(storage_);
+#endif // defined(LEAK_SANITIZER)
+};
+
+} // namespace base
+
+} // namespace angle
+
+#endif // ANGLEBASE_NO_DESTRUCTOR_H_
diff --git a/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/checked_math.h b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/checked_math.h
new file mode 100644
index 0000000000..18bceb7468
--- /dev/null
+++ b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/checked_math.h
@@ -0,0 +1,384 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NUMERICS_CHECKED_MATH_H_
+#define BASE_NUMERICS_CHECKED_MATH_H_
+
+#include <stddef.h>
+
+#include <limits>
+#include <type_traits>
+
+#include "anglebase/numerics/checked_math_impl.h"
+
+namespace angle
+{
+namespace base
+{
+namespace internal
+{
+
+template <typename T>
+class CheckedNumeric
+{
+ static_assert(std::is_arithmetic<T>::value, "CheckedNumeric<T>: T must be a numeric type.");
+
+ public:
+ template <typename Src>
+ friend class CheckedNumeric;
+
+ using type = T;
+
+ constexpr CheckedNumeric() = default;
+
+ // Copy constructor.
+ template <typename Src>
+ constexpr CheckedNumeric(const CheckedNumeric<Src> &rhs)
+ : state_(rhs.state_.value(), rhs.IsValid())
+ {}
+
+ // This is not an explicit constructor because we implicitly upgrade regular
+ // numerics to CheckedNumerics to make them easier to use.
+ template <typename Src>
+ constexpr CheckedNumeric(Src value) // NOLINT(runtime/explicit)
+ : state_(value)
+ {
+ static_assert(std::is_arithmetic<Src>::value, "Argument must be numeric.");
+ }
+
+ // This is not an explicit constructor because we want a seamless conversion
+ // from StrictNumeric types.
+ template <typename Src>
+ constexpr CheckedNumeric(StrictNumeric<Src> value) // NOLINT(runtime/explicit)
+ : state_(static_cast<Src>(value))
+ {}
+
+ // IsValid() - The public API to test if a CheckedNumeric is currently valid.
+ // A range checked destination type can be supplied using the Dst template
+ // parameter.
+ template <typename Dst = T>
+ constexpr bool IsValid() const
+ {
+ return state_.is_valid() && IsValueInRangeForNumericType<Dst>(state_.value());
+ }
+
+ // AssignIfValid(Dst) - Assigns the underlying value if it is currently valid
+ // and is within the range supported by the destination type. Returns true if
+ // successful and false otherwise.
+ template <typename Dst>
+#if defined(__clang__) || defined(__GNUC__)
+ __attribute__((warn_unused_result))
+#elif defined(_MSC_VER)
+ _Check_return_
+#endif
+ constexpr bool
+ AssignIfValid(Dst *result) const
+ {
+ return BASE_NUMERICS_LIKELY(IsValid<Dst>())
+ ? ((*result = static_cast<Dst>(state_.value())), true)
+ : false;
+ }
+
+ // ValueOrDie() - The primary accessor for the underlying value. If the
+ // current state is not valid it will CHECK and crash.
+ // A range checked destination type can be supplied using the Dst template
+ // parameter, which will trigger a CHECK if the value is not in bounds for
+ // the destination.
+ // The CHECK behavior can be overridden by supplying a handler as a
+ // template parameter, for test code, etc. However, the handler cannot access
+ // the underlying value, and it is not available through other means.
+ template <typename Dst = T, class CheckHandler = CheckOnFailure>
+ constexpr StrictNumeric<Dst> ValueOrDie() const
+ {
+ return BASE_NUMERICS_LIKELY(IsValid<Dst>()) ? static_cast<Dst>(state_.value())
+ : CheckHandler::template HandleFailure<Dst>();
+ }
+
+ // ValueOrDefault(T default_value) - A convenience method that returns the
+ // current value if the state is valid, and the supplied default_value for
+ // any other state.
+ // A range checked destination type can be supplied using the Dst template
+ // parameter. WARNING: This function may fail to compile or CHECK at runtime
+ // if the supplied default_value is not within range of the destination type.
+ template <typename Dst = T, typename Src>
+ constexpr StrictNumeric<Dst> ValueOrDefault(const Src default_value) const
+ {
+ return BASE_NUMERICS_LIKELY(IsValid<Dst>()) ? static_cast<Dst>(state_.value())
+ : checked_cast<Dst>(default_value);
+ }
+
+ // Returns a checked numeric of the specified type, cast from the current
+ // CheckedNumeric. If the current state is invalid or the destination cannot
+ // represent the result then the returned CheckedNumeric will be invalid.
+ template <typename Dst>
+ constexpr CheckedNumeric<typename UnderlyingType<Dst>::type> Cast() const
+ {
+ return *this;
+ }
+
+ // This friend method is available solely for providing more detailed logging
+ // in the tests. Do not implement it in production code, because the
+ // underlying values may change at any time.
+ template <typename U>
+ friend U GetNumericValueForTest(const CheckedNumeric<U> &src);
+
+ // Prototypes for the supported arithmetic operator overloads.
+ template <typename Src>
+ constexpr CheckedNumeric &operator+=(const Src rhs);
+ template <typename Src>
+ constexpr CheckedNumeric &operator-=(const Src rhs);
+ template <typename Src>
+ constexpr CheckedNumeric &operator*=(const Src rhs);
+ template <typename Src>
+ constexpr CheckedNumeric &operator/=(const Src rhs);
+ template <typename Src>
+ constexpr CheckedNumeric &operator%=(const Src rhs);
+ template <typename Src>
+ constexpr CheckedNumeric &operator<<=(const Src rhs);
+ template <typename Src>
+ constexpr CheckedNumeric &operator>>=(const Src rhs);
+ template <typename Src>
+ constexpr CheckedNumeric &operator&=(const Src rhs);
+ template <typename Src>
+ constexpr CheckedNumeric &operator|=(const Src rhs);
+ template <typename Src>
+ constexpr CheckedNumeric &operator^=(const Src rhs);
+
+ constexpr CheckedNumeric operator-() const
+ {
+ // Use an optimized code path for a known run-time variable.
+ if (!MustTreatAsConstexpr(state_.value()) && std::is_signed<T>::value &&
+ std::is_floating_point<T>::value)
+ {
+ return FastRuntimeNegate();
+ }
+ // The negation of two's complement int min is int min.
+ const bool is_valid =
+ IsValid() && (!std::is_signed<T>::value || std::is_floating_point<T>::value ||
+ NegateWrapper(state_.value()) != std::numeric_limits<T>::lowest());
+ return CheckedNumeric<T>(NegateWrapper(state_.value()), is_valid);
+ }
+
+ constexpr CheckedNumeric operator~() const
+ {
+ return CheckedNumeric<decltype(InvertWrapper(T()))>(InvertWrapper(state_.value()),
+ IsValid());
+ }
+
+ constexpr CheckedNumeric Abs() const
+ {
+ return !IsValueNegative(state_.value()) ? *this : -*this;
+ }
+
+ template <typename U>
+ constexpr CheckedNumeric<typename MathWrapper<CheckedMaxOp, T, U>::type> Max(const U rhs) const
+ {
+ return CheckMax(*this, rhs);
+ }
+
+ template <typename U>
+ constexpr CheckedNumeric<typename MathWrapper<CheckedMinOp, T, U>::type> Min(const U rhs) const
+ {
+ return CheckMin(*this, rhs);
+ }
+
+ // This function is available only for integral types. It returns an unsigned
+ // integer of the same width as the source type, containing the absolute value
+ // of the source, and properly handling signed min.
+ constexpr CheckedNumeric<typename UnsignedOrFloatForSize<T>::type> UnsignedAbs() const
+ {
+ return CheckedNumeric<typename UnsignedOrFloatForSize<T>::type>(
+ SafeUnsignedAbs(state_.value()), state_.is_valid());
+ }
+
+ constexpr CheckedNumeric &operator++()
+ {
+ *this += 1;
+ return *this;
+ }
+
+ constexpr CheckedNumeric operator++(int)
+ {
+ CheckedNumeric value = *this;
+ *this += 1;
+ return value;
+ }
+
+ constexpr CheckedNumeric &operator--()
+ {
+ *this -= 1;
+ return *this;
+ }
+
+ constexpr CheckedNumeric operator--(int)
+ {
+ // TODO(pkasting): Consider std::exchange() once it's constexpr in C++20.
+ const CheckedNumeric value = *this;
+ *this -= 1;
+ return value;
+ }
+
+ // These perform the actual math operations on the CheckedNumerics.
+ // Binary arithmetic operations.
+ template <template <typename, typename, typename> class M, typename L, typename R>
+ static constexpr CheckedNumeric MathOp(const L lhs, const R rhs)
+ {
+ using Math = typename MathWrapper<M, L, R>::math;
+ T result = 0;
+ const bool is_valid = Wrapper<L>::is_valid(lhs) && Wrapper<R>::is_valid(rhs) &&
+ Math::Do(Wrapper<L>::value(lhs), Wrapper<R>::value(rhs), &result);
+ return CheckedNumeric<T>(result, is_valid);
+ }
+
+ // Assignment arithmetic operations.
+ template <template <typename, typename, typename> class M, typename R>
+ constexpr CheckedNumeric &MathOp(const R rhs)
+ {
+ using Math = typename MathWrapper<M, T, R>::math;
+ T result = 0; // Using T as the destination saves a range check.
+ const bool is_valid = state_.is_valid() && Wrapper<R>::is_valid(rhs) &&
+ Math::Do(state_.value(), Wrapper<R>::value(rhs), &result);
+ *this = CheckedNumeric<T>(result, is_valid);
+ return *this;
+ }
+
+ private:
+ CheckedNumericState<T> state_;
+
+ CheckedNumeric FastRuntimeNegate() const
+ {
+ T result;
+ const bool success = CheckedSubOp<T, T>::Do(T(0), state_.value(), &result);
+ return CheckedNumeric<T>(result, IsValid() && success);
+ }
+
+ template <typename Src>
+ constexpr CheckedNumeric(Src value, bool is_valid) : state_(value, is_valid)
+ {}
+
+ // These wrappers allow us to handle state the same way for both
+ // CheckedNumeric and POD arithmetic types.
+ template <typename Src>
+ struct Wrapper
+ {
+ static constexpr bool is_valid(Src) { return true; }
+ static constexpr Src value(Src value) { return value; }
+ };
+
+ template <typename Src>
+ struct Wrapper<CheckedNumeric<Src>>
+ {
+ static constexpr bool is_valid(const CheckedNumeric<Src> v) { return v.IsValid(); }
+ static constexpr Src value(const CheckedNumeric<Src> v) { return v.state_.value(); }
+ };
+
+ template <typename Src>
+ struct Wrapper<StrictNumeric<Src>>
+ {
+ static constexpr bool is_valid(const StrictNumeric<Src>) { return true; }
+ static constexpr Src value(const StrictNumeric<Src> v) { return static_cast<Src>(v); }
+ };
+};
+
+// Convenience functions to avoid the ugly template disambiguator syntax.
+template <typename Dst, typename Src>
+constexpr bool IsValidForType(const CheckedNumeric<Src> value)
+{
+ return value.template IsValid<Dst>();
+}
+
+template <typename Dst, typename Src>
+constexpr StrictNumeric<Dst> ValueOrDieForType(const CheckedNumeric<Src> value)
+{
+ return value.template ValueOrDie<Dst>();
+}
+
+template <typename Dst, typename Src, typename Default>
+constexpr StrictNumeric<Dst> ValueOrDefaultForType(const CheckedNumeric<Src> value,
+ const Default default_value)
+{
+ return value.template ValueOrDefault<Dst>(default_value);
+}
+
+// Convience wrapper to return a new CheckedNumeric from the provided arithmetic
+// or CheckedNumericType.
+template <typename T>
+constexpr CheckedNumeric<typename UnderlyingType<T>::type> MakeCheckedNum(const T value)
+{
+ return value;
+}
+
+// These implement the variadic wrapper for the math operations.
+template <template <typename, typename, typename> class M, typename L, typename R>
+constexpr CheckedNumeric<typename MathWrapper<M, L, R>::type> CheckMathOp(const L lhs, const R rhs)
+{
+ using Math = typename MathWrapper<M, L, R>::math;
+ return CheckedNumeric<typename Math::result_type>::template MathOp<M>(lhs, rhs);
+}
+
+// General purpose wrapper template for arithmetic operations.
+template <template <typename, typename, typename> class M, typename L, typename R, typename... Args>
+constexpr auto CheckMathOp(const L lhs, const R rhs, const Args... args)
+{
+ return CheckMathOp<M>(CheckMathOp<M>(lhs, rhs), args...);
+}
+
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Add, +, +=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Sub, -, -=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Mul, *, *=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Div, /, /=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Mod, %, %=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Lsh, <<, <<=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Rsh, >>, >>=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, And, &, &=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Or, |, |=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Xor, ^, ^=)
+BASE_NUMERIC_ARITHMETIC_VARIADIC(Checked, Check, Max)
+BASE_NUMERIC_ARITHMETIC_VARIADIC(Checked, Check, Min)
+
+// These are some extra StrictNumeric operators to support simple pointer
+// arithmetic with our result types. Since wrapping on a pointer is always
+// bad, we trigger the CHECK condition here.
+template <typename L, typename R>
+L *operator+(L *lhs, const StrictNumeric<R> rhs)
+{
+ const uintptr_t result =
+ CheckAdd(reinterpret_cast<uintptr_t>(lhs), CheckMul(sizeof(L), static_cast<R>(rhs)))
+ .template ValueOrDie<uintptr_t>();
+ return reinterpret_cast<L *>(result);
+}
+
+template <typename L, typename R>
+L *operator-(L *lhs, const StrictNumeric<R> rhs)
+{
+ const uintptr_t result =
+ CheckSub(reinterpret_cast<uintptr_t>(lhs), CheckMul(sizeof(L), static_cast<R>(rhs)))
+ .template ValueOrDie<uintptr_t>();
+ return reinterpret_cast<L *>(result);
+}
+
+} // namespace internal
+
+using internal::CheckAdd;
+using internal::CheckAnd;
+using internal::CheckDiv;
+using internal::CheckedNumeric;
+using internal::CheckLsh;
+using internal::CheckMax;
+using internal::CheckMin;
+using internal::CheckMod;
+using internal::CheckMul;
+using internal::CheckOr;
+using internal::CheckRsh;
+using internal::CheckSub;
+using internal::CheckXor;
+using internal::IsValidForType;
+using internal::MakeCheckedNum;
+using internal::ValueOrDefaultForType;
+using internal::ValueOrDieForType;
+
+} // namespace base
+} // namespace angle
+
+#endif // BASE_NUMERICS_CHECKED_MATH_H_
diff --git a/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/checked_math_impl.h b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/checked_math_impl.h
new file mode 100644
index 0000000000..e4b6082770
--- /dev/null
+++ b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/checked_math_impl.h
@@ -0,0 +1,641 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NUMERICS_CHECKED_MATH_IMPL_H_
+#define BASE_NUMERICS_CHECKED_MATH_IMPL_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <climits>
+#include <cmath>
+#include <cstdlib>
+#include <limits>
+#include <type_traits>
+
+#include "anglebase/numerics/safe_conversions.h"
+#include "anglebase/numerics/safe_math_shared_impl.h"
+
+namespace angle
+{
+namespace base
+{
+namespace internal
+{
+
+template <typename T>
+constexpr bool CheckedAddImpl(T x, T y, T *result)
+{
+ static_assert(std::is_integral<T>::value, "Type must be integral");
+ // Since the value of x+y is undefined if we have a signed type, we compute
+ // it using the unsigned type of the same size.
+ using UnsignedDst = typename std::make_unsigned<T>::type;
+ using SignedDst = typename std::make_signed<T>::type;
+ const UnsignedDst ux = static_cast<UnsignedDst>(x);
+ const UnsignedDst uy = static_cast<UnsignedDst>(y);
+ const UnsignedDst uresult = static_cast<UnsignedDst>(ux + uy);
+ // Addition is valid if the sign of (x + y) is equal to either that of x or
+ // that of y.
+ if (std::is_signed<T>::value ? static_cast<SignedDst>((uresult ^ ux) & (uresult ^ uy)) < 0
+ : uresult < uy) // Unsigned is either valid or underflow.
+ return false;
+ *result = static_cast<T>(uresult);
+ return true;
+}
+
+template <typename T, typename U, class Enable = void>
+struct CheckedAddOp
+{};
+
+template <typename T, typename U>
+struct CheckedAddOp<
+ T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value && std::is_integral<U>::value>::type>
+{
+ using result_type = typename MaxExponentPromotion<T, U>::type;
+ template <typename V>
+ static constexpr bool Do(T x, U y, V *result)
+ {
+ if constexpr (CheckedAddFastOp<T, U>::is_supported)
+ return CheckedAddFastOp<T, U>::Do(x, y, result);
+
+ // Double the underlying type up to a full machine word.
+ using FastPromotion = typename FastIntegerArithmeticPromotion<T, U>::type;
+ using Promotion =
+ typename std::conditional<(IntegerBitsPlusSign<FastPromotion>::value >
+ IntegerBitsPlusSign<intptr_t>::value),
+ typename BigEnoughPromotion<T, U>::type, FastPromotion>::type;
+ // Fail if either operand is out of range for the promoted type.
+ // TODO(jschuh): This could be made to work for a broader range of values.
+ if (BASE_NUMERICS_UNLIKELY(!IsValueInRangeForNumericType<Promotion>(x) ||
+ !IsValueInRangeForNumericType<Promotion>(y)))
+ {
+ return false;
+ }
+
+ Promotion presult = {};
+ bool is_valid = true;
+ if constexpr (IsIntegerArithmeticSafe<Promotion, T, U>::value)
+ {
+ presult = static_cast<Promotion>(x) + static_cast<Promotion>(y);
+ }
+ else
+ {
+ is_valid =
+ CheckedAddImpl(static_cast<Promotion>(x), static_cast<Promotion>(y), &presult);
+ }
+ if (!is_valid || !IsValueInRangeForNumericType<V>(presult))
+ return false;
+ *result = static_cast<V>(presult);
+ return true;
+ }
+};
+
+template <typename T>
+constexpr bool CheckedSubImpl(T x, T y, T *result)
+{
+ static_assert(std::is_integral<T>::value, "Type must be integral");
+ // Since the value of x+y is undefined if we have a signed type, we compute
+ // it using the unsigned type of the same size.
+ using UnsignedDst = typename std::make_unsigned<T>::type;
+ using SignedDst = typename std::make_signed<T>::type;
+ const UnsignedDst ux = static_cast<UnsignedDst>(x);
+ const UnsignedDst uy = static_cast<UnsignedDst>(y);
+ const UnsignedDst uresult = static_cast<UnsignedDst>(ux - uy);
+ // Subtraction is valid if either x and y have same sign, or (x-y) and x have
+ // the same sign.
+ if (std::is_signed<T>::value ? static_cast<SignedDst>((uresult ^ ux) & (ux ^ uy)) < 0 : x < y)
+ return false;
+ *result = static_cast<T>(uresult);
+ return true;
+}
+
+template <typename T, typename U, class Enable = void>
+struct CheckedSubOp
+{};
+
+template <typename T, typename U>
+struct CheckedSubOp<
+ T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value && std::is_integral<U>::value>::type>
+{
+ using result_type = typename MaxExponentPromotion<T, U>::type;
+ template <typename V>
+ static constexpr bool Do(T x, U y, V *result)
+ {
+ if constexpr (CheckedSubFastOp<T, U>::is_supported)
+ return CheckedSubFastOp<T, U>::Do(x, y, result);
+
+ // Double the underlying type up to a full machine word.
+ using FastPromotion = typename FastIntegerArithmeticPromotion<T, U>::type;
+ using Promotion =
+ typename std::conditional<(IntegerBitsPlusSign<FastPromotion>::value >
+ IntegerBitsPlusSign<intptr_t>::value),
+ typename BigEnoughPromotion<T, U>::type, FastPromotion>::type;
+ // Fail if either operand is out of range for the promoted type.
+ // TODO(jschuh): This could be made to work for a broader range of values.
+ if (BASE_NUMERICS_UNLIKELY(!IsValueInRangeForNumericType<Promotion>(x) ||
+ !IsValueInRangeForNumericType<Promotion>(y)))
+ {
+ return false;
+ }
+
+ Promotion presult = {};
+ bool is_valid = true;
+ if constexpr (IsIntegerArithmeticSafe<Promotion, T, U>::value)
+ {
+ presult = static_cast<Promotion>(x) - static_cast<Promotion>(y);
+ }
+ else
+ {
+ is_valid =
+ CheckedSubImpl(static_cast<Promotion>(x), static_cast<Promotion>(y), &presult);
+ }
+ if (!is_valid || !IsValueInRangeForNumericType<V>(presult))
+ return false;
+ *result = static_cast<V>(presult);
+ return true;
+ }
+};
+
+template <typename T>
+constexpr bool CheckedMulImpl(T x, T y, T *result)
+{
+ static_assert(std::is_integral<T>::value, "Type must be integral");
+ // Since the value of x*y is potentially undefined if we have a signed type,
+ // we compute it using the unsigned type of the same size.
+ using UnsignedDst = typename std::make_unsigned<T>::type;
+ using SignedDst = typename std::make_signed<T>::type;
+ const UnsignedDst ux = SafeUnsignedAbs(x);
+ const UnsignedDst uy = SafeUnsignedAbs(y);
+ const UnsignedDst uresult = static_cast<UnsignedDst>(ux * uy);
+ const bool is_negative = std::is_signed<T>::value && static_cast<SignedDst>(x ^ y) < 0;
+ // We have a fast out for unsigned identity or zero on the second operand.
+ // After that it's an unsigned overflow check on the absolute value, with
+ // a +1 bound for a negative result.
+ if (uy > UnsignedDst(!std::is_signed<T>::value || is_negative) &&
+ ux > (std::numeric_limits<T>::max() + UnsignedDst(is_negative)) / uy)
+ return false;
+ *result = is_negative ? 0 - uresult : uresult;
+ return true;
+}
+
+template <typename T, typename U, class Enable = void>
+struct CheckedMulOp
+{};
+
+template <typename T, typename U>
+struct CheckedMulOp<
+ T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value && std::is_integral<U>::value>::type>
+{
+ using result_type = typename MaxExponentPromotion<T, U>::type;
+ template <typename V>
+ static constexpr bool Do(T x, U y, V *result)
+ {
+ if constexpr (CheckedMulFastOp<T, U>::is_supported)
+ return CheckedMulFastOp<T, U>::Do(x, y, result);
+
+ using Promotion = typename FastIntegerArithmeticPromotion<T, U>::type;
+ // Verify the destination type can hold the result (always true for 0).
+ if (BASE_NUMERICS_UNLIKELY((!IsValueInRangeForNumericType<Promotion>(x) ||
+ !IsValueInRangeForNumericType<Promotion>(y)) &&
+ x && y))
+ {
+ return false;
+ }
+
+ Promotion presult = {};
+ bool is_valid = true;
+ if constexpr (CheckedMulFastOp<Promotion, Promotion>::is_supported)
+ {
+ // The fast op may be available with the promoted type.
+ is_valid = CheckedMulFastOp<Promotion, Promotion>::Do(x, y, &presult);
+ }
+ else if (IsIntegerArithmeticSafe<Promotion, T, U>::value)
+ {
+ presult = static_cast<Promotion>(x) * static_cast<Promotion>(y);
+ }
+ else
+ {
+ is_valid =
+ CheckedMulImpl(static_cast<Promotion>(x), static_cast<Promotion>(y), &presult);
+ }
+ if (!is_valid || !IsValueInRangeForNumericType<V>(presult))
+ return false;
+ *result = static_cast<V>(presult);
+ return true;
+ }
+};
+
+// Division just requires a check for a zero denominator or an invalid negation
+// on signed min/-1.
+template <typename T, typename U, class Enable = void>
+struct CheckedDivOp
+{};
+
+template <typename T, typename U>
+struct CheckedDivOp<
+ T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value && std::is_integral<U>::value>::type>
+{
+ using result_type = typename MaxExponentPromotion<T, U>::type;
+ template <typename V>
+ static constexpr bool Do(T x, U y, V *result)
+ {
+ if (BASE_NUMERICS_UNLIKELY(!y))
+ return false;
+
+ // The overflow check can be compiled away if we don't have the exact
+ // combination of types needed to trigger this case.
+ using Promotion = typename BigEnoughPromotion<T, U>::type;
+ if (BASE_NUMERICS_UNLIKELY(
+ (std::is_signed<T>::value && std::is_signed<U>::value &&
+ IsTypeInRangeForNumericType<T, Promotion>::value &&
+ static_cast<Promotion>(x) == std::numeric_limits<Promotion>::lowest() &&
+ y == static_cast<U>(-1))))
+ {
+ return false;
+ }
+
+ // This branch always compiles away if the above branch wasn't removed.
+ if (BASE_NUMERICS_UNLIKELY((!IsValueInRangeForNumericType<Promotion>(x) ||
+ !IsValueInRangeForNumericType<Promotion>(y)) &&
+ x))
+ {
+ return false;
+ }
+
+ const Promotion presult = Promotion(x) / Promotion(y);
+ if (!IsValueInRangeForNumericType<V>(presult))
+ return false;
+ *result = static_cast<V>(presult);
+ return true;
+ }
+};
+
+template <typename T, typename U, class Enable = void>
+struct CheckedModOp
+{};
+
+template <typename T, typename U>
+struct CheckedModOp<
+ T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value && std::is_integral<U>::value>::type>
+{
+ using result_type = typename MaxExponentPromotion<T, U>::type;
+ template <typename V>
+ static constexpr bool Do(T x, U y, V *result)
+ {
+ if (BASE_NUMERICS_UNLIKELY(!y))
+ return false;
+
+ using Promotion = typename BigEnoughPromotion<T, U>::type;
+ if (BASE_NUMERICS_UNLIKELY(
+ (std::is_signed<T>::value && std::is_signed<U>::value &&
+ IsTypeInRangeForNumericType<T, Promotion>::value &&
+ static_cast<Promotion>(x) == std::numeric_limits<Promotion>::lowest() &&
+ y == static_cast<U>(-1))))
+ {
+ *result = 0;
+ return true;
+ }
+
+ const Promotion presult = static_cast<Promotion>(x) % static_cast<Promotion>(y);
+ if (!IsValueInRangeForNumericType<V>(presult))
+ return false;
+ *result = static_cast<Promotion>(presult);
+ return true;
+ }
+};
+
+template <typename T, typename U, class Enable = void>
+struct CheckedLshOp
+{};
+
+// Left shift. Shifts less than 0 or greater than or equal to the number
+// of bits in the promoted type are undefined. Shifts of negative values
+// are undefined. Otherwise it is defined when the result fits.
+template <typename T, typename U>
+struct CheckedLshOp<
+ T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value && std::is_integral<U>::value>::type>
+{
+ using result_type = T;
+ template <typename V>
+ static constexpr bool Do(T x, U shift, V *result)
+ {
+ // Disallow negative numbers and verify the shift is in bounds.
+ if (BASE_NUMERICS_LIKELY(!IsValueNegative(x) &&
+ as_unsigned(shift) < as_unsigned(std::numeric_limits<T>::digits)))
+ {
+ // Shift as unsigned to avoid undefined behavior.
+ *result = static_cast<V>(as_unsigned(x) << shift);
+ // If the shift can be reversed, we know it was valid.
+ return *result >> shift == x;
+ }
+
+ // Handle the legal corner-case of a full-width signed shift of zero.
+ if (!std::is_signed<T>::value || x ||
+ as_unsigned(shift) != as_unsigned(std::numeric_limits<T>::digits))
+ return false;
+ *result = 0;
+ return true;
+ }
+};
+
+template <typename T, typename U, class Enable = void>
+struct CheckedRshOp
+{};
+
+// Right shift. Shifts less than 0 or greater than or equal to the number
+// of bits in the promoted type are undefined. Otherwise, it is always defined,
+// but a right shift of a negative value is implementation-dependent.
+template <typename T, typename U>
+struct CheckedRshOp<
+ T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value && std::is_integral<U>::value>::type>
+{
+ using result_type = T;
+ template <typename V>
+ static bool Do(T x, U shift, V *result)
+ {
+ // Use sign conversion to push negative values out of range.
+ if (BASE_NUMERICS_UNLIKELY(as_unsigned(shift) >= IntegerBitsPlusSign<T>::value))
+ {
+ return false;
+ }
+
+ const T tmp = x >> shift;
+ if (!IsValueInRangeForNumericType<V>(tmp))
+ return false;
+ *result = static_cast<V>(tmp);
+ return true;
+ }
+};
+
+template <typename T, typename U, class Enable = void>
+struct CheckedAndOp
+{};
+
+// For simplicity we support only unsigned integer results.
+template <typename T, typename U>
+struct CheckedAndOp<
+ T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value && std::is_integral<U>::value>::type>
+{
+ using result_type =
+ typename std::make_unsigned<typename MaxExponentPromotion<T, U>::type>::type;
+ template <typename V>
+ static constexpr bool Do(T x, U y, V *result)
+ {
+ const result_type tmp = static_cast<result_type>(x) & static_cast<result_type>(y);
+ if (!IsValueInRangeForNumericType<V>(tmp))
+ return false;
+ *result = static_cast<V>(tmp);
+ return true;
+ }
+};
+
+template <typename T, typename U, class Enable = void>
+struct CheckedOrOp
+{};
+
+// For simplicity we support only unsigned integers.
+template <typename T, typename U>
+struct CheckedOrOp<
+ T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value && std::is_integral<U>::value>::type>
+{
+ using result_type =
+ typename std::make_unsigned<typename MaxExponentPromotion<T, U>::type>::type;
+ template <typename V>
+ static constexpr bool Do(T x, U y, V *result)
+ {
+ const result_type tmp = static_cast<result_type>(x) | static_cast<result_type>(y);
+ if (!IsValueInRangeForNumericType<V>(tmp))
+ return false;
+ *result = static_cast<V>(tmp);
+ return true;
+ }
+};
+
+template <typename T, typename U, class Enable = void>
+struct CheckedXorOp
+{};
+
+// For simplicity we support only unsigned integers.
+template <typename T, typename U>
+struct CheckedXorOp<
+ T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value && std::is_integral<U>::value>::type>
+{
+ using result_type =
+ typename std::make_unsigned<typename MaxExponentPromotion<T, U>::type>::type;
+ template <typename V>
+ static constexpr bool Do(T x, U y, V *result)
+ {
+ const result_type tmp = static_cast<result_type>(x) ^ static_cast<result_type>(y);
+ if (!IsValueInRangeForNumericType<V>(tmp))
+ return false;
+ *result = static_cast<V>(tmp);
+ return true;
+ }
+};
+
+// Max doesn't really need to be implemented this way because it can't fail,
+// but it makes the code much cleaner to use the MathOp wrappers.
+template <typename T, typename U, class Enable = void>
+struct CheckedMaxOp
+{};
+
+template <typename T, typename U>
+struct CheckedMaxOp<
+ T,
+ U,
+ typename std::enable_if<std::is_arithmetic<T>::value && std::is_arithmetic<U>::value>::type>
+{
+ using result_type = typename MaxExponentPromotion<T, U>::type;
+ template <typename V>
+ static constexpr bool Do(T x, U y, V *result)
+ {
+ const result_type tmp =
+ IsGreater<T, U>::Test(x, y) ? static_cast<result_type>(x) : static_cast<result_type>(y);
+ if (!IsValueInRangeForNumericType<V>(tmp))
+ return false;
+ *result = static_cast<V>(tmp);
+ return true;
+ }
+};
+
+// Min doesn't really need to be implemented this way because it can't fail,
+// but it makes the code much cleaner to use the MathOp wrappers.
+template <typename T, typename U, class Enable = void>
+struct CheckedMinOp
+{};
+
+template <typename T, typename U>
+struct CheckedMinOp<
+ T,
+ U,
+ typename std::enable_if<std::is_arithmetic<T>::value && std::is_arithmetic<U>::value>::type>
+{
+ using result_type = typename LowestValuePromotion<T, U>::type;
+ template <typename V>
+ static constexpr bool Do(T x, U y, V *result)
+ {
+ const result_type tmp =
+ IsLess<T, U>::Test(x, y) ? static_cast<result_type>(x) : static_cast<result_type>(y);
+ if (!IsValueInRangeForNumericType<V>(tmp))
+ return false;
+ *result = static_cast<V>(tmp);
+ return true;
+ }
+};
+
+// This is just boilerplate that wraps the standard floating point arithmetic.
+// A macro isn't the nicest solution, but it beats rewriting these repeatedly.
+#define BASE_FLOAT_ARITHMETIC_OPS(NAME, OP) \
+ template <typename T, typename U> \
+ struct Checked##NAME##Op<T, U, \
+ typename std::enable_if<std::is_floating_point<T>::value || \
+ std::is_floating_point<U>::value>::type> \
+ { \
+ using result_type = typename MaxExponentPromotion<T, U>::type; \
+ template <typename V> \
+ static constexpr bool Do(T x, U y, V *result) \
+ { \
+ using Promotion = typename MaxExponentPromotion<T, U>::type; \
+ const Promotion presult = x OP y; \
+ if (!IsValueInRangeForNumericType<V>(presult)) \
+ return false; \
+ *result = static_cast<V>(presult); \
+ return true; \
+ } \
+ };
+
+BASE_FLOAT_ARITHMETIC_OPS(Add, +)
+BASE_FLOAT_ARITHMETIC_OPS(Sub, -)
+BASE_FLOAT_ARITHMETIC_OPS(Mul, *)
+BASE_FLOAT_ARITHMETIC_OPS(Div, /)
+
+#undef BASE_FLOAT_ARITHMETIC_OPS
+
+// Floats carry around their validity state with them, but integers do not. So,
+// we wrap the underlying value in a specialization in order to hide that detail
+// and expose an interface via accessors.
+enum NumericRepresentation
+{
+ NUMERIC_INTEGER,
+ NUMERIC_FLOATING,
+ NUMERIC_UNKNOWN
+};
+
+template <typename NumericType>
+struct GetNumericRepresentation
+{
+ static const NumericRepresentation value =
+ std::is_integral<NumericType>::value
+ ? NUMERIC_INTEGER
+ : (std::is_floating_point<NumericType>::value ? NUMERIC_FLOATING : NUMERIC_UNKNOWN);
+};
+
+template <typename T, NumericRepresentation type = GetNumericRepresentation<T>::value>
+class CheckedNumericState
+{};
+
+// Integrals require quite a bit of additional housekeeping to manage state.
+template <typename T>
+class CheckedNumericState<T, NUMERIC_INTEGER>
+{
+ public:
+ template <typename Src = int>
+ constexpr explicit CheckedNumericState(Src value = 0, bool is_valid = true)
+ : is_valid_(is_valid && IsValueInRangeForNumericType<T>(value)),
+ value_(WellDefinedConversionOrZero(value, is_valid_))
+ {
+ static_assert(std::is_arithmetic<Src>::value, "Argument must be numeric.");
+ }
+
+ template <typename Src>
+ constexpr CheckedNumericState(const CheckedNumericState<Src> &rhs)
+ : CheckedNumericState(rhs.value(), rhs.is_valid())
+ {}
+
+ constexpr bool is_valid() const { return is_valid_; }
+
+ constexpr T value() const { return value_; }
+
+ private:
+ // Ensures that a type conversion does not trigger undefined behavior.
+ template <typename Src>
+ static constexpr T WellDefinedConversionOrZero(Src value, bool is_valid)
+ {
+ using SrcType = typename internal::UnderlyingType<Src>::type;
+ return (std::is_integral<SrcType>::value || is_valid) ? static_cast<T>(value) : 0;
+ }
+
+ // is_valid_ precedes value_ because member intializers in the constructors
+ // are evaluated in field order, and is_valid_ must be read when initializing
+ // value_.
+ bool is_valid_;
+ T value_;
+};
+
+// Floating points maintain their own validity, but need translation wrappers.
+template <typename T>
+class CheckedNumericState<T, NUMERIC_FLOATING>
+{
+ public:
+ template <typename Src = double>
+ constexpr explicit CheckedNumericState(Src value = 0.0, bool is_valid = true)
+ : value_(
+ WellDefinedConversionOrNaN(value, is_valid && IsValueInRangeForNumericType<T>(value)))
+ {}
+
+ template <typename Src>
+ constexpr CheckedNumericState(const CheckedNumericState<Src> &rhs)
+ : CheckedNumericState(rhs.value(), rhs.is_valid())
+ {}
+
+ constexpr bool is_valid() const
+ {
+ // Written this way because std::isfinite is not reliably constexpr.
+ return MustTreatAsConstexpr(value_) ? value_ <= std::numeric_limits<T>::max() &&
+ value_ >= std::numeric_limits<T>::lowest()
+ : std::isfinite(value_);
+ }
+
+ constexpr T value() const { return value_; }
+
+ private:
+ // Ensures that a type conversion does not trigger undefined behavior.
+ template <typename Src>
+ static constexpr T WellDefinedConversionOrNaN(Src value, bool is_valid)
+ {
+ using SrcType = typename internal::UnderlyingType<Src>::type;
+ return (StaticDstRangeRelationToSrcRange<T, SrcType>::value == NUMERIC_RANGE_CONTAINED ||
+ is_valid)
+ ? static_cast<T>(value)
+ : std::numeric_limits<T>::quiet_NaN();
+ }
+
+ T value_;
+};
+
+} // namespace internal
+} // namespace base
+} // namespace angle
+
+#endif // BASE_NUMERICS_CHECKED_MATH_IMPL_H_
diff --git a/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/clamped_math.h b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/clamped_math.h
new file mode 100644
index 0000000000..33d2e4b233
--- /dev/null
+++ b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/clamped_math.h
@@ -0,0 +1,270 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NUMERICS_CLAMPED_MATH_H_
+#define BASE_NUMERICS_CLAMPED_MATH_H_
+
+#include <stddef.h>
+
+#include <limits>
+#include <type_traits>
+
+#include "anglebase/numerics/clamped_math_impl.h"
+
+namespace angle
+{
+namespace base
+{
+namespace internal
+{
+
+template <typename T>
+class ClampedNumeric
+{
+ static_assert(std::is_arithmetic<T>::value, "ClampedNumeric<T>: T must be a numeric type.");
+
+ public:
+ using type = T;
+
+ constexpr ClampedNumeric() : value_(0) {}
+
+ // Copy constructor.
+ template <typename Src>
+ constexpr ClampedNumeric(const ClampedNumeric<Src> &rhs) : value_(saturated_cast<T>(rhs.value_))
+ {}
+
+ template <typename Src>
+ friend class ClampedNumeric;
+
+ // This is not an explicit constructor because we implicitly upgrade regular
+ // numerics to ClampedNumerics to make them easier to use.
+ template <typename Src>
+ constexpr ClampedNumeric(Src value) // NOLINT(runtime/explicit)
+ : value_(saturated_cast<T>(value))
+ {
+ static_assert(std::is_arithmetic<Src>::value, "Argument must be numeric.");
+ }
+
+ // This is not an explicit constructor because we want a seamless conversion
+ // from StrictNumeric types.
+ template <typename Src>
+ constexpr ClampedNumeric(StrictNumeric<Src> value) // NOLINT(runtime/explicit)
+ : value_(saturated_cast<T>(static_cast<Src>(value)))
+ {}
+
+ // Returns a ClampedNumeric of the specified type, cast from the current
+ // ClampedNumeric, and saturated to the destination type.
+ template <typename Dst>
+ constexpr ClampedNumeric<typename UnderlyingType<Dst>::type> Cast() const
+ {
+ return *this;
+ }
+
+ // Prototypes for the supported arithmetic operator overloads.
+ template <typename Src>
+ constexpr ClampedNumeric &operator+=(const Src rhs);
+ template <typename Src>
+ constexpr ClampedNumeric &operator-=(const Src rhs);
+ template <typename Src>
+ constexpr ClampedNumeric &operator*=(const Src rhs);
+ template <typename Src>
+ constexpr ClampedNumeric &operator/=(const Src rhs);
+ template <typename Src>
+ constexpr ClampedNumeric &operator%=(const Src rhs);
+ template <typename Src>
+ constexpr ClampedNumeric &operator<<=(const Src rhs);
+ template <typename Src>
+ constexpr ClampedNumeric &operator>>=(const Src rhs);
+ template <typename Src>
+ constexpr ClampedNumeric &operator&=(const Src rhs);
+ template <typename Src>
+ constexpr ClampedNumeric &operator|=(const Src rhs);
+ template <typename Src>
+ constexpr ClampedNumeric &operator^=(const Src rhs);
+
+ constexpr ClampedNumeric operator-() const
+ {
+ // The negation of two's complement int min is int min, so that's the
+ // only overflow case where we will saturate.
+ return ClampedNumeric<T>(SaturatedNegWrapper(value_));
+ }
+
+ constexpr ClampedNumeric operator~() const
+ {
+ return ClampedNumeric<decltype(InvertWrapper(T()))>(InvertWrapper(value_));
+ }
+
+ constexpr ClampedNumeric Abs() const
+ {
+ // The negation of two's complement int min is int min, so that's the
+ // only overflow case where we will saturate.
+ return ClampedNumeric<T>(SaturatedAbsWrapper(value_));
+ }
+
+ template <typename U>
+ constexpr ClampedNumeric<typename MathWrapper<ClampedMaxOp, T, U>::type> Max(const U rhs) const
+ {
+ using result_type = typename MathWrapper<ClampedMaxOp, T, U>::type;
+ return ClampedNumeric<result_type>(ClampedMaxOp<T, U>::Do(value_, Wrapper<U>::value(rhs)));
+ }
+
+ template <typename U>
+ constexpr ClampedNumeric<typename MathWrapper<ClampedMinOp, T, U>::type> Min(const U rhs) const
+ {
+ using result_type = typename MathWrapper<ClampedMinOp, T, U>::type;
+ return ClampedNumeric<result_type>(ClampedMinOp<T, U>::Do(value_, Wrapper<U>::value(rhs)));
+ }
+
+ // This function is available only for integral types. It returns an unsigned
+ // integer of the same width as the source type, containing the absolute value
+ // of the source, and properly handling signed min.
+ constexpr ClampedNumeric<typename UnsignedOrFloatForSize<T>::type> UnsignedAbs() const
+ {
+ return ClampedNumeric<typename UnsignedOrFloatForSize<T>::type>(SafeUnsignedAbs(value_));
+ }
+
+ constexpr ClampedNumeric &operator++()
+ {
+ *this += 1;
+ return *this;
+ }
+
+ constexpr ClampedNumeric operator++(int)
+ {
+ ClampedNumeric value = *this;
+ *this += 1;
+ return value;
+ }
+
+ constexpr ClampedNumeric &operator--()
+ {
+ *this -= 1;
+ return *this;
+ }
+
+ constexpr ClampedNumeric operator--(int)
+ {
+ ClampedNumeric value = *this;
+ *this -= 1;
+ return value;
+ }
+
+ // These perform the actual math operations on the ClampedNumerics.
+ // Binary arithmetic operations.
+ template <template <typename, typename, typename> class M, typename L, typename R>
+ static constexpr ClampedNumeric MathOp(const L lhs, const R rhs)
+ {
+ using Math = typename MathWrapper<M, L, R>::math;
+ return ClampedNumeric<T>(
+ Math::template Do<T>(Wrapper<L>::value(lhs), Wrapper<R>::value(rhs)));
+ }
+
+ // Assignment arithmetic operations.
+ template <template <typename, typename, typename> class M, typename R>
+ constexpr ClampedNumeric &MathOp(const R rhs)
+ {
+ using Math = typename MathWrapper<M, T, R>::math;
+ *this = ClampedNumeric<T>(Math::template Do<T>(value_, Wrapper<R>::value(rhs)));
+ return *this;
+ }
+
+ template <typename Dst>
+ constexpr operator Dst() const
+ {
+ return saturated_cast<typename ArithmeticOrUnderlyingEnum<Dst>::type>(value_);
+ }
+
+ // This method extracts the raw integer value without saturating it to the
+ // destination type as the conversion operator does. This is useful when
+ // e.g. assigning to an auto type or passing as a deduced template parameter.
+ constexpr T RawValue() const { return value_; }
+
+ private:
+ T value_;
+
+ // These wrappers allow us to handle state the same way for both
+ // ClampedNumeric and POD arithmetic types.
+ template <typename Src>
+ struct Wrapper
+ {
+ static constexpr Src value(Src value)
+ {
+ return static_cast<typename UnderlyingType<Src>::type>(value);
+ }
+ };
+};
+
+// Convience wrapper to return a new ClampedNumeric from the provided arithmetic
+// or ClampedNumericType.
+template <typename T>
+constexpr ClampedNumeric<typename UnderlyingType<T>::type> MakeClampedNum(const T value)
+{
+ return value;
+}
+
+#if !BASE_NUMERICS_DISABLE_OSTREAM_OPERATORS
+// Overload the ostream output operator to make logging work nicely.
+template <typename T>
+std::ostream &operator<<(std::ostream &os, const ClampedNumeric<T> &value)
+{
+ os << static_cast<T>(value);
+ return os;
+}
+#endif
+
+// These implement the variadic wrapper for the math operations.
+template <template <typename, typename, typename> class M, typename L, typename R>
+constexpr ClampedNumeric<typename MathWrapper<M, L, R>::type> ClampMathOp(const L lhs, const R rhs)
+{
+ using Math = typename MathWrapper<M, L, R>::math;
+ return ClampedNumeric<typename Math::result_type>::template MathOp<M>(lhs, rhs);
+}
+
+// General purpose wrapper template for arithmetic operations.
+template <template <typename, typename, typename> class M, typename L, typename R, typename... Args>
+constexpr auto ClampMathOp(const L lhs, const R rhs, const Args... args)
+{
+ return ClampMathOp<M>(ClampMathOp<M>(lhs, rhs), args...);
+}
+
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Add, +, +=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Sub, -, -=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Mul, *, *=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Div, /, /=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Mod, %, %=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Lsh, <<, <<=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Rsh, >>, >>=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, And, &, &=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Or, |, |=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Xor, ^, ^=)
+BASE_NUMERIC_ARITHMETIC_VARIADIC(Clamped, Clamp, Max)
+BASE_NUMERIC_ARITHMETIC_VARIADIC(Clamped, Clamp, Min)
+BASE_NUMERIC_COMPARISON_OPERATORS(Clamped, IsLess, <)
+BASE_NUMERIC_COMPARISON_OPERATORS(Clamped, IsLessOrEqual, <=)
+BASE_NUMERIC_COMPARISON_OPERATORS(Clamped, IsGreater, >)
+BASE_NUMERIC_COMPARISON_OPERATORS(Clamped, IsGreaterOrEqual, >=)
+BASE_NUMERIC_COMPARISON_OPERATORS(Clamped, IsEqual, ==)
+BASE_NUMERIC_COMPARISON_OPERATORS(Clamped, IsNotEqual, !=)
+
+} // namespace internal
+
+using internal::ClampAdd;
+using internal::ClampAnd;
+using internal::ClampDiv;
+using internal::ClampedNumeric;
+using internal::ClampLsh;
+using internal::ClampMax;
+using internal::ClampMin;
+using internal::ClampMod;
+using internal::ClampMul;
+using internal::ClampOr;
+using internal::ClampRsh;
+using internal::ClampSub;
+using internal::ClampXor;
+using internal::MakeClampedNum;
+
+} // namespace base
+} // namespace angle
+
+#endif // BASE_NUMERICS_CLAMPED_MATH_H_
diff --git a/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/clamped_math_impl.h b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/clamped_math_impl.h
new file mode 100644
index 0000000000..198723eeaa
--- /dev/null
+++ b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/clamped_math_impl.h
@@ -0,0 +1,368 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NUMERICS_CLAMPED_MATH_IMPL_H_
+#define BASE_NUMERICS_CLAMPED_MATH_IMPL_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <climits>
+#include <cmath>
+#include <cstdlib>
+#include <limits>
+#include <type_traits>
+
+#include "anglebase/numerics/checked_math.h"
+#include "anglebase/numerics/safe_conversions.h"
+#include "anglebase/numerics/safe_math_shared_impl.h"
+
+namespace angle
+{
+namespace base
+{
+namespace internal
+{
+
+template <typename T,
+ typename std::enable_if<std::is_integral<T>::value && std::is_signed<T>::value>::type * =
+ nullptr>
+constexpr T SaturatedNegWrapper(T value)
+{
+ return MustTreatAsConstexpr(value) || !ClampedNegFastOp<T>::is_supported
+ ? (NegateWrapper(value) != std::numeric_limits<T>::lowest()
+ ? NegateWrapper(value)
+ : std::numeric_limits<T>::max())
+ : ClampedNegFastOp<T>::Do(value);
+}
+
+template <typename T,
+ typename std::enable_if<std::is_integral<T>::value && !std::is_signed<T>::value>::type * =
+ nullptr>
+constexpr T SaturatedNegWrapper(T value)
+{
+ return T(0);
+}
+
+template <typename T, typename std::enable_if<std::is_floating_point<T>::value>::type * = nullptr>
+constexpr T SaturatedNegWrapper(T value)
+{
+ return -value;
+}
+
+template <typename T, typename std::enable_if<std::is_integral<T>::value>::type * = nullptr>
+constexpr T SaturatedAbsWrapper(T value)
+{
+ // The calculation below is a static identity for unsigned types, but for
+ // signed integer types it provides a non-branching, saturated absolute value.
+ // This works because SafeUnsignedAbs() returns an unsigned type, which can
+ // represent the absolute value of all negative numbers of an equal-width
+ // integer type. The call to IsValueNegative() then detects overflow in the
+ // special case of numeric_limits<T>::min(), by evaluating the bit pattern as
+ // a signed integer value. If it is the overflow case, we end up subtracting
+ // one from the unsigned result, thus saturating to numeric_limits<T>::max().
+ return static_cast<T>(SafeUnsignedAbs(value) - IsValueNegative<T>(SafeUnsignedAbs(value)));
+}
+
+template <typename T, typename std::enable_if<std::is_floating_point<T>::value>::type * = nullptr>
+constexpr T SaturatedAbsWrapper(T value)
+{
+ return value < 0 ? -value : value;
+}
+
+template <typename T, typename U, class Enable = void>
+struct ClampedAddOp
+{};
+
+template <typename T, typename U>
+struct ClampedAddOp<
+ T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value && std::is_integral<U>::value>::type>
+{
+ using result_type = typename MaxExponentPromotion<T, U>::type;
+ template <typename V = result_type>
+ static constexpr V Do(T x, U y)
+ {
+ if (ClampedAddFastOp<T, U>::is_supported)
+ return ClampedAddFastOp<T, U>::template Do<V>(x, y);
+
+ static_assert(
+ std::is_same<V, result_type>::value || IsTypeInRangeForNumericType<U, V>::value,
+ "The saturation result cannot be determined from the "
+ "provided types.");
+ const V saturated = CommonMaxOrMin<V>(IsValueNegative(y));
+ V result = {};
+ return BASE_NUMERICS_LIKELY((CheckedAddOp<T, U>::Do(x, y, &result))) ? result : saturated;
+ }
+};
+
+template <typename T, typename U, class Enable = void>
+struct ClampedSubOp
+{};
+
+template <typename T, typename U>
+struct ClampedSubOp<
+ T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value && std::is_integral<U>::value>::type>
+{
+ using result_type = typename MaxExponentPromotion<T, U>::type;
+ template <typename V = result_type>
+ static constexpr V Do(T x, U y)
+ {
+ if constexpr (ClampedSubFastOp<T, U>::is_supported)
+ return ClampedSubFastOp<T, U>::template Do<V>(x, y);
+
+ static_assert(
+ std::is_same<V, result_type>::value || IsTypeInRangeForNumericType<U, V>::value,
+ "The saturation result cannot be determined from the "
+ "provided types.");
+ const V saturated = CommonMaxOrMin<V>(!IsValueNegative(y));
+ V result = {};
+ return BASE_NUMERICS_LIKELY((CheckedSubOp<T, U>::Do(x, y, &result))) ? result : saturated;
+ }
+};
+
+template <typename T, typename U, class Enable = void>
+struct ClampedMulOp
+{};
+
+template <typename T, typename U>
+struct ClampedMulOp<
+ T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value && std::is_integral<U>::value>::type>
+{
+ using result_type = typename MaxExponentPromotion<T, U>::type;
+ template <typename V = result_type>
+ static constexpr V Do(T x, U y)
+ {
+ if constexpr (ClampedMulFastOp<T, U>::is_supported)
+ return ClampedMulFastOp<T, U>::template Do<V>(x, y);
+
+ V result = {};
+ const V saturated = CommonMaxOrMin<V>(IsValueNegative(x) ^ IsValueNegative(y));
+ return BASE_NUMERICS_LIKELY((CheckedMulOp<T, U>::Do(x, y, &result))) ? result : saturated;
+ }
+};
+
+template <typename T, typename U, class Enable = void>
+struct ClampedDivOp
+{};
+
+template <typename T, typename U>
+struct ClampedDivOp<
+ T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value && std::is_integral<U>::value>::type>
+{
+ using result_type = typename MaxExponentPromotion<T, U>::type;
+ template <typename V = result_type>
+ static constexpr V Do(T x, U y)
+ {
+ V result = {};
+ if (BASE_NUMERICS_LIKELY((CheckedDivOp<T, U>::Do(x, y, &result))))
+ return result;
+ // Saturation goes to max, min, or NaN (if x is zero).
+ return x ? CommonMaxOrMin<V>(IsValueNegative(x) ^ IsValueNegative(y))
+ : SaturationDefaultLimits<V>::NaN();
+ }
+};
+
+template <typename T, typename U, class Enable = void>
+struct ClampedModOp
+{};
+
+template <typename T, typename U>
+struct ClampedModOp<
+ T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value && std::is_integral<U>::value>::type>
+{
+ using result_type = typename MaxExponentPromotion<T, U>::type;
+ template <typename V = result_type>
+ static constexpr V Do(T x, U y)
+ {
+ V result = {};
+ return BASE_NUMERICS_LIKELY((CheckedModOp<T, U>::Do(x, y, &result))) ? result : x;
+ }
+};
+
+template <typename T, typename U, class Enable = void>
+struct ClampedLshOp
+{};
+
+// Left shift. Non-zero values saturate in the direction of the sign. A zero
+// shifted by any value always results in zero.
+template <typename T, typename U>
+struct ClampedLshOp<
+ T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value && std::is_integral<U>::value>::type>
+{
+ using result_type = T;
+ template <typename V = result_type>
+ static constexpr V Do(T x, U shift)
+ {
+ static_assert(!std::is_signed<U>::value, "Shift value must be unsigned.");
+ if (BASE_NUMERICS_LIKELY(shift < std::numeric_limits<T>::digits))
+ {
+ // Shift as unsigned to avoid undefined behavior.
+ V result = static_cast<V>(as_unsigned(x) << shift);
+ // If the shift can be reversed, we know it was valid.
+ if (BASE_NUMERICS_LIKELY(result >> shift == x))
+ return result;
+ }
+ return x ? CommonMaxOrMin<V>(IsValueNegative(x)) : 0;
+ }
+};
+
+template <typename T, typename U, class Enable = void>
+struct ClampedRshOp
+{};
+
+// Right shift. Negative values saturate to -1. Positive or 0 saturates to 0.
+template <typename T, typename U>
+struct ClampedRshOp<
+ T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value && std::is_integral<U>::value>::type>
+{
+ using result_type = T;
+ template <typename V = result_type>
+ static constexpr V Do(T x, U shift)
+ {
+ static_assert(!std::is_signed<U>::value, "Shift value must be unsigned.");
+ // Signed right shift is odd, because it saturates to -1 or 0.
+ const V saturated = as_unsigned(V(0)) - IsValueNegative(x);
+ return BASE_NUMERICS_LIKELY(shift < IntegerBitsPlusSign<T>::value)
+ ? saturated_cast<V>(x >> shift)
+ : saturated;
+ }
+};
+
+template <typename T, typename U, class Enable = void>
+struct ClampedAndOp
+{};
+
+template <typename T, typename U>
+struct ClampedAndOp<
+ T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value && std::is_integral<U>::value>::type>
+{
+ using result_type =
+ typename std::make_unsigned<typename MaxExponentPromotion<T, U>::type>::type;
+ template <typename V>
+ static constexpr V Do(T x, U y)
+ {
+ return static_cast<result_type>(x) & static_cast<result_type>(y);
+ }
+};
+
+template <typename T, typename U, class Enable = void>
+struct ClampedOrOp
+{};
+
+// For simplicity we promote to unsigned integers.
+template <typename T, typename U>
+struct ClampedOrOp<
+ T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value && std::is_integral<U>::value>::type>
+{
+ using result_type =
+ typename std::make_unsigned<typename MaxExponentPromotion<T, U>::type>::type;
+ template <typename V>
+ static constexpr V Do(T x, U y)
+ {
+ return static_cast<result_type>(x) | static_cast<result_type>(y);
+ }
+};
+
+template <typename T, typename U, class Enable = void>
+struct ClampedXorOp
+{};
+
+// For simplicity we support only unsigned integers.
+template <typename T, typename U>
+struct ClampedXorOp<
+ T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value && std::is_integral<U>::value>::type>
+{
+ using result_type =
+ typename std::make_unsigned<typename MaxExponentPromotion<T, U>::type>::type;
+ template <typename V>
+ static constexpr V Do(T x, U y)
+ {
+ return static_cast<result_type>(x) ^ static_cast<result_type>(y);
+ }
+};
+
+template <typename T, typename U, class Enable = void>
+struct ClampedMaxOp
+{};
+
+template <typename T, typename U>
+struct ClampedMaxOp<
+ T,
+ U,
+ typename std::enable_if<std::is_arithmetic<T>::value && std::is_arithmetic<U>::value>::type>
+{
+ using result_type = typename MaxExponentPromotion<T, U>::type;
+ template <typename V = result_type>
+ static constexpr V Do(T x, U y)
+ {
+ return IsGreater<T, U>::Test(x, y) ? saturated_cast<V>(x) : saturated_cast<V>(y);
+ }
+};
+
+template <typename T, typename U, class Enable = void>
+struct ClampedMinOp
+{};
+
+template <typename T, typename U>
+struct ClampedMinOp<
+ T,
+ U,
+ typename std::enable_if<std::is_arithmetic<T>::value && std::is_arithmetic<U>::value>::type>
+{
+ using result_type = typename LowestValuePromotion<T, U>::type;
+ template <typename V = result_type>
+ static constexpr V Do(T x, U y)
+ {
+ return IsLess<T, U>::Test(x, y) ? saturated_cast<V>(x) : saturated_cast<V>(y);
+ }
+};
+
+// This is just boilerplate that wraps the standard floating point arithmetic.
+// A macro isn't the nicest solution, but it beats rewriting these repeatedly.
+#define BASE_FLOAT_ARITHMETIC_OPS(NAME, OP) \
+ template <typename T, typename U> \
+ struct Clamped##NAME##Op<T, U, \
+ typename std::enable_if<std::is_floating_point<T>::value || \
+ std::is_floating_point<U>::value>::type> \
+ { \
+ using result_type = typename MaxExponentPromotion<T, U>::type; \
+ template <typename V = result_type> \
+ static constexpr V Do(T x, U y) \
+ { \
+ return saturated_cast<V>(x OP y); \
+ } \
+ };
+
+BASE_FLOAT_ARITHMETIC_OPS(Add, +)
+BASE_FLOAT_ARITHMETIC_OPS(Sub, -)
+BASE_FLOAT_ARITHMETIC_OPS(Mul, *)
+BASE_FLOAT_ARITHMETIC_OPS(Div, /)
+
+#undef BASE_FLOAT_ARITHMETIC_OPS
+
+} // namespace internal
+} // namespace base
+} // namespace angle
+
+#endif // BASE_NUMERICS_CLAMPED_MATH_IMPL_H_
diff --git a/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/math_constants.h b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/math_constants.h
new file mode 100644
index 0000000000..385ce3970f
--- /dev/null
+++ b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/math_constants.h
@@ -0,0 +1,20 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NUMERICS_MATH_CONSTANTS_H_
+#define BASE_NUMERICS_MATH_CONSTANTS_H_
+
+namespace base
+{
+
+constexpr double kPiDouble = 3.14159265358979323846;
+constexpr float kPiFloat = 3.14159265358979323846f;
+
+// The mean acceleration due to gravity on Earth in m/s^2.
+constexpr double kMeanGravityDouble = 9.80665;
+constexpr float kMeanGravityFloat = 9.80665f;
+
+} // namespace base
+
+#endif // BASE_NUMERICS_MATH_CONSTANTS_H_
diff --git a/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/ranges.h b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/ranges.h
new file mode 100644
index 0000000000..55a5a295af
--- /dev/null
+++ b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/ranges.h
@@ -0,0 +1,39 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NUMERICS_RANGES_H_
+#define BASE_NUMERICS_RANGES_H_
+
+#include <algorithm>
+#include <cmath>
+
+namespace base
+{
+
+// DO NOT USE THIS FUNCTION. IT IS DUE TO BE REMOVED. https://crbug.com/1231569
+// Please use base::clamp() from base/cxx17_backports.h instead.
+//
+// This function, unlike base::clamp(), does not check if `min` is greater than
+// `max`, and returns a bogus answer if it is. Please migrate all code that
+// calls this function to use base::clamp() instead.
+//
+// If, for some reason the broken behavior is required, please re-create this
+// min/max nesting inline in the host code and explain with a comment why it
+// is needed.
+template <class T>
+constexpr const T &BrokenClampThatShouldNotBeUsed(const T &value, const T &min, const T &max)
+{
+ return std::min(std::max(value, min), max);
+}
+
+template <typename T>
+constexpr bool IsApproximatelyEqual(T lhs, T rhs, T tolerance)
+{
+ static_assert(std::is_arithmetic<T>::value, "Argument must be arithmetic");
+ return std::abs(rhs - lhs) <= tolerance;
+}
+
+} // namespace base
+
+#endif // BASE_NUMERICS_RANGES_H_
diff --git a/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_conversions.h b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_conversions.h
new file mode 100644
index 0000000000..3aa80c7ed0
--- /dev/null
+++ b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_conversions.h
@@ -0,0 +1,403 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NUMERICS_SAFE_CONVERSIONS_H_
+#define BASE_NUMERICS_SAFE_CONVERSIONS_H_
+
+#include <stddef.h>
+
+#include <cmath>
+#include <limits>
+#include <type_traits>
+
+#include "anglebase/numerics/safe_conversions_impl.h"
+
+#if defined(__ARMEL__) && !defined(__native_client__)
+# include "anglebase/numerics/safe_conversions_arm_impl.h"
+# define BASE_HAS_OPTIMIZED_SAFE_CONVERSIONS (1)
+#else
+# define BASE_HAS_OPTIMIZED_SAFE_CONVERSIONS (0)
+#endif
+
+#if !BASE_NUMERICS_DISABLE_OSTREAM_OPERATORS
+# include <ostream>
+#endif
+
+namespace angle
+{
+namespace base
+{
+namespace internal
+{
+
+#if !BASE_HAS_OPTIMIZED_SAFE_CONVERSIONS
+template <typename Dst, typename Src>
+struct SaturateFastAsmOp
+{
+ static constexpr bool is_supported = false;
+ static constexpr Dst Do(Src)
+ {
+ // Force a compile failure if instantiated.
+ return CheckOnFailure::template HandleFailure<Dst>();
+ }
+};
+#endif // BASE_HAS_OPTIMIZED_SAFE_CONVERSIONS
+#undef BASE_HAS_OPTIMIZED_SAFE_CONVERSIONS
+
+// The following special case a few specific integer conversions where we can
+// eke out better performance than range checking.
+template <typename Dst, typename Src, typename Enable = void>
+struct IsValueInRangeFastOp
+{
+ static constexpr bool is_supported = false;
+ static constexpr bool Do(Src value)
+ {
+ // Force a compile failure if instantiated.
+ return CheckOnFailure::template HandleFailure<bool>();
+ }
+};
+
+// Signed to signed range comparison.
+template <typename Dst, typename Src>
+struct IsValueInRangeFastOp<
+ Dst,
+ Src,
+ typename std::enable_if<std::is_integral<Dst>::value && std::is_integral<Src>::value &&
+ std::is_signed<Dst>::value && std::is_signed<Src>::value &&
+ !IsTypeInRangeForNumericType<Dst, Src>::value>::type>
+{
+ static constexpr bool is_supported = true;
+
+ static constexpr bool Do(Src value)
+ {
+ // Just downcast to the smaller type, sign extend it back to the original
+ // type, and then see if it matches the original value.
+ return value == static_cast<Dst>(value);
+ }
+};
+
+// Signed to unsigned range comparison.
+template <typename Dst, typename Src>
+struct IsValueInRangeFastOp<
+ Dst,
+ Src,
+ typename std::enable_if<std::is_integral<Dst>::value && std::is_integral<Src>::value &&
+ !std::is_signed<Dst>::value && std::is_signed<Src>::value &&
+ !IsTypeInRangeForNumericType<Dst, Src>::value>::type>
+{
+ static constexpr bool is_supported = true;
+
+ static constexpr bool Do(Src value)
+ {
+ // We cast a signed as unsigned to overflow negative values to the top,
+ // then compare against whichever maximum is smaller, as our upper bound.
+ return as_unsigned(value) <= as_unsigned(CommonMax<Src, Dst>());
+ }
+};
+
+// Convenience function that returns true if the supplied value is in range
+// for the destination type.
+template <typename Dst, typename Src>
+constexpr bool IsValueInRangeForNumericType(Src value)
+{
+ using SrcType = typename internal::UnderlyingType<Src>::type;
+ return internal::IsValueInRangeFastOp<Dst, SrcType>::is_supported
+ ? internal::IsValueInRangeFastOp<Dst, SrcType>::Do(static_cast<SrcType>(value))
+ : internal::DstRangeRelationToSrcRange<Dst>(static_cast<SrcType>(value)).IsValid();
+}
+
+// checked_cast<> is analogous to static_cast<> for numeric types,
+// except that it CHECKs that the specified numeric conversion will not
+// overflow or underflow. NaN source will always trigger a CHECK.
+template <typename Dst, class CheckHandler = internal::CheckOnFailure, typename Src>
+constexpr Dst checked_cast(Src value)
+{
+ // This throws a compile-time error on evaluating the constexpr if it can be
+ // determined at compile-time as failing, otherwise it will CHECK at runtime.
+ using SrcType = typename internal::UnderlyingType<Src>::type;
+ return BASE_NUMERICS_LIKELY((IsValueInRangeForNumericType<Dst>(value)))
+ ? static_cast<Dst>(static_cast<SrcType>(value))
+ : CheckHandler::template HandleFailure<Dst>();
+}
+
+// Default boundaries for integral/float: max/infinity, lowest/-infinity, 0/NaN.
+// You may provide your own limits (e.g. to saturated_cast) so long as you
+// implement all of the static constexpr member functions in the class below.
+template <typename T>
+struct SaturationDefaultLimits : public std::numeric_limits<T>
+{
+ static constexpr T NaN()
+ {
+ return std::numeric_limits<T>::has_quiet_NaN ? std::numeric_limits<T>::quiet_NaN() : T();
+ }
+ using std::numeric_limits<T>::max;
+ static constexpr T Overflow()
+ {
+ return std::numeric_limits<T>::has_infinity ? std::numeric_limits<T>::infinity()
+ : std::numeric_limits<T>::max();
+ }
+ using std::numeric_limits<T>::lowest;
+ static constexpr T Underflow()
+ {
+ return std::numeric_limits<T>::has_infinity ? std::numeric_limits<T>::infinity() * -1
+ : std::numeric_limits<T>::lowest();
+ }
+};
+
+template <typename Dst, template <typename> class S, typename Src>
+constexpr Dst saturated_cast_impl(Src value, RangeCheck constraint)
+{
+ // For some reason clang generates much better code when the branch is
+ // structured exactly this way, rather than a sequence of checks.
+ return !constraint.IsOverflowFlagSet()
+ ? (!constraint.IsUnderflowFlagSet() ? static_cast<Dst>(value) : S<Dst>::Underflow())
+ // Skip this check for integral Src, which cannot be NaN.
+ : (std::is_integral<Src>::value || !constraint.IsUnderflowFlagSet()
+ ? S<Dst>::Overflow()
+ : S<Dst>::NaN());
+}
+
+// We can reduce the number of conditions and get slightly better performance
+// for normal signed and unsigned integer ranges. And in the specific case of
+// Arm, we can use the optimized saturation instructions.
+template <typename Dst, typename Src, typename Enable = void>
+struct SaturateFastOp
+{
+ static constexpr bool is_supported = false;
+ static constexpr Dst Do(Src value)
+ {
+ // Force a compile failure if instantiated.
+ return CheckOnFailure::template HandleFailure<Dst>();
+ }
+};
+
+template <typename Dst, typename Src>
+struct SaturateFastOp<
+ Dst,
+ Src,
+ typename std::enable_if<std::is_integral<Src>::value && std::is_integral<Dst>::value &&
+ SaturateFastAsmOp<Dst, Src>::is_supported>::type>
+{
+ static constexpr bool is_supported = true;
+ static constexpr Dst Do(Src value) { return SaturateFastAsmOp<Dst, Src>::Do(value); }
+};
+
+template <typename Dst, typename Src>
+struct SaturateFastOp<
+ Dst,
+ Src,
+ typename std::enable_if<std::is_integral<Src>::value && std::is_integral<Dst>::value &&
+ !SaturateFastAsmOp<Dst, Src>::is_supported>::type>
+{
+ static constexpr bool is_supported = true;
+ static constexpr Dst Do(Src value)
+ {
+ // The exact order of the following is structured to hit the correct
+ // optimization heuristics across compilers. Do not change without
+ // checking the emitted code.
+ const Dst saturated = CommonMaxOrMin<Dst, Src>(
+ IsMaxInRangeForNumericType<Dst, Src>() ||
+ (!IsMinInRangeForNumericType<Dst, Src>() && IsValueNegative(value)));
+ return BASE_NUMERICS_LIKELY(IsValueInRangeForNumericType<Dst>(value))
+ ? static_cast<Dst>(value)
+ : saturated;
+ }
+};
+
+// saturated_cast<> is analogous to static_cast<> for numeric types, except
+// that the specified numeric conversion will saturate by default rather than
+// overflow or underflow, and NaN assignment to an integral will return 0.
+// All boundary condition behaviors can be overriden with a custom handler.
+template <typename Dst,
+ template <typename> class SaturationHandler = SaturationDefaultLimits,
+ typename Src>
+constexpr Dst saturated_cast(Src value)
+{
+ using SrcType = typename UnderlyingType<Src>::type;
+ return !IsCompileTimeConstant(value) && SaturateFastOp<Dst, SrcType>::is_supported &&
+ std::is_same<SaturationHandler<Dst>, SaturationDefaultLimits<Dst>>::value
+ ? SaturateFastOp<Dst, SrcType>::Do(static_cast<SrcType>(value))
+ : saturated_cast_impl<Dst, SaturationHandler, SrcType>(
+ static_cast<SrcType>(value),
+ DstRangeRelationToSrcRange<Dst, SaturationHandler, SrcType>(
+ static_cast<SrcType>(value)));
+}
+
+// strict_cast<> is analogous to static_cast<> for numeric types, except that
+// it will cause a compile failure if the destination type is not large enough
+// to contain any value in the source type. It performs no runtime checking.
+template <typename Dst, typename Src>
+constexpr Dst strict_cast(Src value)
+{
+ using SrcType = typename UnderlyingType<Src>::type;
+ static_assert(UnderlyingType<Src>::is_numeric, "Argument must be numeric.");
+ static_assert(std::is_arithmetic<Dst>::value, "Result must be numeric.");
+
+ // If you got here from a compiler error, it's because you tried to assign
+ // from a source type to a destination type that has insufficient range.
+ // The solution may be to change the destination type you're assigning to,
+ // and use one large enough to represent the source.
+ // Alternatively, you may be better served with the checked_cast<> or
+ // saturated_cast<> template functions for your particular use case.
+ static_assert(StaticDstRangeRelationToSrcRange<Dst, SrcType>::value == NUMERIC_RANGE_CONTAINED,
+ "The source type is out of range for the destination type. "
+ "Please see strict_cast<> comments for more information.");
+
+ return static_cast<Dst>(static_cast<SrcType>(value));
+}
+
+// Some wrappers to statically check that a type is in range.
+template <typename Dst, typename Src, class Enable = void>
+struct IsNumericRangeContained
+{
+ static constexpr bool value = false;
+};
+
+template <typename Dst, typename Src>
+struct IsNumericRangeContained<
+ Dst,
+ Src,
+ typename std::enable_if<ArithmeticOrUnderlyingEnum<Dst>::value &&
+ ArithmeticOrUnderlyingEnum<Src>::value>::type>
+{
+ static constexpr bool value =
+ StaticDstRangeRelationToSrcRange<Dst, Src>::value == NUMERIC_RANGE_CONTAINED;
+};
+
+// StrictNumeric implements compile time range checking between numeric types by
+// wrapping assignment operations in a strict_cast. This class is intended to be
+// used for function arguments and return types, to ensure the destination type
+// can always contain the source type. This is essentially the same as enforcing
+// -Wconversion in gcc and C4302 warnings on MSVC, but it can be applied
+// incrementally at API boundaries, making it easier to convert code so that it
+// compiles cleanly with truncation warnings enabled.
+// This template should introduce no runtime overhead, but it also provides no
+// runtime checking of any of the associated mathematical operations. Use
+// CheckedNumeric for runtime range checks of the actual value being assigned.
+template <typename T>
+class StrictNumeric
+{
+ public:
+ using type = T;
+
+ constexpr StrictNumeric() : value_(0) {}
+
+ // Copy constructor.
+ template <typename Src>
+ constexpr StrictNumeric(const StrictNumeric<Src> &rhs) : value_(strict_cast<T>(rhs.value_))
+ {}
+
+ // This is not an explicit constructor because we implicitly upgrade regular
+ // numerics to StrictNumerics to make them easier to use.
+ template <typename Src>
+ constexpr StrictNumeric(Src value) // NOLINT(runtime/explicit)
+ : value_(strict_cast<T>(value))
+ {}
+
+ // If you got here from a compiler error, it's because you tried to assign
+ // from a source type to a destination type that has insufficient range.
+ // The solution may be to change the destination type you're assigning to,
+ // and use one large enough to represent the source.
+ // If you're assigning from a CheckedNumeric<> class, you may be able to use
+ // the AssignIfValid() member function, specify a narrower destination type to
+ // the member value functions (e.g. val.template ValueOrDie<Dst>()), use one
+ // of the value helper functions (e.g. ValueOrDieForType<Dst>(val)).
+ // If you've encountered an _ambiguous overload_ you can use a static_cast<>
+ // to explicitly cast the result to the destination type.
+ // If none of that works, you may be better served with the checked_cast<> or
+ // saturated_cast<> template functions for your particular use case.
+ template <typename Dst,
+ typename std::enable_if<IsNumericRangeContained<Dst, T>::value>::type * = nullptr>
+ constexpr operator Dst() const
+ {
+ return static_cast<typename ArithmeticOrUnderlyingEnum<Dst>::type>(value_);
+ }
+
+ private:
+ const T value_;
+};
+
+// Convience wrapper returns a StrictNumeric from the provided arithmetic type.
+template <typename T>
+constexpr StrictNumeric<typename UnderlyingType<T>::type> MakeStrictNum(const T value)
+{
+ return value;
+}
+
+#if !BASE_NUMERICS_DISABLE_OSTREAM_OPERATORS
+// Overload the ostream output operator to make logging work nicely.
+template <typename T>
+std::ostream &operator<<(std::ostream &os, const StrictNumeric<T> &value)
+{
+ os << static_cast<T>(value);
+ return os;
+}
+#endif
+
+#define BASE_NUMERIC_COMPARISON_OPERATORS(CLASS, NAME, OP) \
+ template <typename L, typename R, \
+ typename std::enable_if<internal::Is##CLASS##Op<L, R>::value>::type * = nullptr> \
+ constexpr bool operator OP(const L lhs, const R rhs) \
+ { \
+ return SafeCompare<NAME, typename UnderlyingType<L>::type, \
+ typename UnderlyingType<R>::type>(lhs, rhs); \
+ }
+
+BASE_NUMERIC_COMPARISON_OPERATORS(Strict, IsLess, <)
+BASE_NUMERIC_COMPARISON_OPERATORS(Strict, IsLessOrEqual, <=)
+BASE_NUMERIC_COMPARISON_OPERATORS(Strict, IsGreater, >)
+BASE_NUMERIC_COMPARISON_OPERATORS(Strict, IsGreaterOrEqual, >=)
+BASE_NUMERIC_COMPARISON_OPERATORS(Strict, IsEqual, ==)
+BASE_NUMERIC_COMPARISON_OPERATORS(Strict, IsNotEqual, !=)
+
+} // namespace internal
+
+using internal::as_signed;
+using internal::as_unsigned;
+using internal::checked_cast;
+using internal::IsTypeInRangeForNumericType;
+using internal::IsValueInRangeForNumericType;
+using internal::IsValueNegative;
+using internal::MakeStrictNum;
+using internal::SafeUnsignedAbs;
+using internal::saturated_cast;
+using internal::strict_cast;
+using internal::StrictNumeric;
+
+// Explicitly make a shorter size_t alias for convenience.
+using SizeT = StrictNumeric<size_t>;
+
+// floating -> integral conversions that saturate and thus can actually return
+// an integral type. In most cases, these should be preferred over the std::
+// versions.
+template <
+ typename Dst = int,
+ typename Src,
+ typename = std::enable_if_t<std::is_integral<Dst>::value && std::is_floating_point<Src>::value>>
+Dst ClampFloor(Src value)
+{
+ return saturated_cast<Dst>(std::floor(value));
+}
+template <
+ typename Dst = int,
+ typename Src,
+ typename = std::enable_if_t<std::is_integral<Dst>::value && std::is_floating_point<Src>::value>>
+Dst ClampCeil(Src value)
+{
+ return saturated_cast<Dst>(std::ceil(value));
+}
+template <
+ typename Dst = int,
+ typename Src,
+ typename = std::enable_if_t<std::is_integral<Dst>::value && std::is_floating_point<Src>::value>>
+Dst ClampRound(Src value)
+{
+ const Src rounded = (value >= 0.0f) ? std::floor(value + 0.5f) : std::ceil(value - 0.5f);
+ return saturated_cast<Dst>(rounded);
+}
+
+} // namespace base
+} // namespace angle
+
+#endif // BASE_NUMERICS_SAFE_CONVERSIONS_H_
diff --git a/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_conversions_arm_impl.h b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_conversions_arm_impl.h
new file mode 100644
index 0000000000..74e5bcc0c6
--- /dev/null
+++ b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_conversions_arm_impl.h
@@ -0,0 +1,60 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NUMERICS_SAFE_CONVERSIONS_ARM_IMPL_H_
+#define BASE_NUMERICS_SAFE_CONVERSIONS_ARM_IMPL_H_
+
+#include <cassert>
+#include <limits>
+#include <type_traits>
+
+#include "anglebase/numerics/safe_conversions_impl.h"
+
+namespace angle
+{
+namespace base
+{
+namespace internal
+{
+
+// Fast saturation to a destination type.
+template <typename Dst, typename Src>
+struct SaturateFastAsmOp
+{
+ static constexpr bool is_supported =
+ std::is_signed<Src>::value && std::is_integral<Dst>::value &&
+ std::is_integral<Src>::value &&
+ IntegerBitsPlusSign<Src>::value <= IntegerBitsPlusSign<int32_t>::value &&
+ IntegerBitsPlusSign<Dst>::value <= IntegerBitsPlusSign<int32_t>::value &&
+ !IsTypeInRangeForNumericType<Dst, Src>::value;
+
+ __attribute__((always_inline)) static Dst Do(Src value)
+ {
+ int32_t src = value;
+ typename std::conditional<std::is_signed<Dst>::value, int32_t, uint32_t>::type result;
+ if (std::is_signed<Dst>::value)
+ {
+ asm("ssat %[dst], %[shift], %[src]"
+ : [dst] "=r"(result)
+ : [src] "r"(src), [shift] "n"(IntegerBitsPlusSign<Dst>::value <= 32
+ ? IntegerBitsPlusSign<Dst>::value
+ : 32));
+ }
+ else
+ {
+ asm("usat %[dst], %[shift], %[src]"
+ : [dst] "=r"(result)
+ : [src] "r"(src), [shift] "n"(IntegerBitsPlusSign<Dst>::value < 32
+ ? IntegerBitsPlusSign<Dst>::value
+ : 31));
+ }
+ return static_cast<Dst>(result);
+ }
+};
+
+} // namespace internal
+} // namespace base
+} // namespace angle
+
+#endif // BASE_NUMERICS_SAFE_CONVERSIONS_ARM_IMPL_H_
diff --git a/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_conversions_impl.h b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_conversions_impl.h
new file mode 100644
index 0000000000..7d0f031754
--- /dev/null
+++ b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_conversions_impl.h
@@ -0,0 +1,893 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NUMERICS_SAFE_CONVERSIONS_IMPL_H_
+#define BASE_NUMERICS_SAFE_CONVERSIONS_IMPL_H_
+
+#include <stdint.h>
+
+#include <limits>
+#include <type_traits>
+
+#if defined(__GNUC__) || defined(__clang__)
+# define BASE_NUMERICS_LIKELY(x) __builtin_expect(!!(x), 1)
+# define BASE_NUMERICS_UNLIKELY(x) __builtin_expect(!!(x), 0)
+#else
+# define BASE_NUMERICS_LIKELY(x) (x)
+# define BASE_NUMERICS_UNLIKELY(x) (x)
+#endif
+
+namespace angle
+{
+namespace base
+{
+namespace internal
+{
+
+// The std library doesn't provide a binary max_exponent for integers, however
+// we can compute an analog using std::numeric_limits<>::digits.
+template <typename NumericType>
+struct MaxExponent
+{
+ static const int value = std::is_floating_point<NumericType>::value
+ ? std::numeric_limits<NumericType>::max_exponent
+ : std::numeric_limits<NumericType>::digits + 1;
+};
+
+// The number of bits (including the sign) in an integer. Eliminates sizeof
+// hacks.
+template <typename NumericType>
+struct IntegerBitsPlusSign
+{
+ static const int value =
+ std::numeric_limits<NumericType>::digits + std::is_signed<NumericType>::value;
+};
+
+// Helper templates for integer manipulations.
+
+template <typename Integer>
+struct PositionOfSignBit
+{
+ static const size_t value = IntegerBitsPlusSign<Integer>::value - 1;
+};
+
+// Determines if a numeric value is negative without throwing compiler
+// warnings on: unsigned(value) < 0.
+template <typename T, typename std::enable_if<std::is_signed<T>::value>::type * = nullptr>
+constexpr bool IsValueNegative(T value)
+{
+ static_assert(std::is_arithmetic<T>::value, "Argument must be numeric.");
+ return value < 0;
+}
+
+template <typename T, typename std::enable_if<!std::is_signed<T>::value>::type * = nullptr>
+constexpr bool IsValueNegative(T)
+{
+ static_assert(std::is_arithmetic<T>::value, "Argument must be numeric.");
+ return false;
+}
+
+// This performs a fast negation, returning a signed value. It works on unsigned
+// arguments, but probably doesn't do what you want for any unsigned value
+// larger than max / 2 + 1 (i.e. signed min cast to unsigned).
+template <typename T>
+constexpr typename std::make_signed<T>::type ConditionalNegate(T x, bool is_negative)
+{
+ static_assert(std::is_integral<T>::value, "Type must be integral");
+ using SignedT = typename std::make_signed<T>::type;
+ using UnsignedT = typename std::make_unsigned<T>::type;
+ return static_cast<SignedT>((static_cast<UnsignedT>(x) ^ -SignedT(is_negative)) + is_negative);
+}
+
+// This performs a safe, absolute value via unsigned overflow.
+template <typename T>
+constexpr typename std::make_unsigned<T>::type SafeUnsignedAbs(T value)
+{
+ static_assert(std::is_integral<T>::value, "Type must be integral");
+ using UnsignedT = typename std::make_unsigned<T>::type;
+ return IsValueNegative(value) ? static_cast<UnsignedT>(0u - static_cast<UnsignedT>(value))
+ : static_cast<UnsignedT>(value);
+}
+
+// This allows us to switch paths on known compile-time constants.
+#if defined(__clang__) || defined(__GNUC__)
+constexpr bool CanDetectCompileTimeConstant()
+{
+ return true;
+}
+template <typename T>
+constexpr bool IsCompileTimeConstant(const T v)
+{
+ return __builtin_constant_p(v);
+}
+#else
+constexpr bool CanDetectCompileTimeConstant()
+{
+ return false;
+}
+template <typename T>
+constexpr bool IsCompileTimeConstant(const T)
+{
+ return false;
+}
+#endif
+template <typename T>
+constexpr bool MustTreatAsConstexpr(const T v)
+{
+ // Either we can't detect a compile-time constant, and must always use the
+ // constexpr path, or we know we have a compile-time constant.
+ return !CanDetectCompileTimeConstant() || IsCompileTimeConstant(v);
+}
+
+// Forces a crash, like a CHECK(false). Used for numeric boundary errors.
+// Also used in a constexpr template to trigger a compilation failure on
+// an error condition.
+struct CheckOnFailure
+{
+ template <typename T>
+ static T HandleFailure()
+ {
+#if defined(_MSC_VER)
+ __debugbreak();
+#elif defined(__GNUC__) || defined(__clang__)
+ __builtin_trap();
+#else
+ ((void)(*(volatile char *)0 = 0));
+#endif
+ return T();
+ }
+};
+
+enum IntegerRepresentation
+{
+ INTEGER_REPRESENTATION_UNSIGNED,
+ INTEGER_REPRESENTATION_SIGNED
+};
+
+// A range for a given nunmeric Src type is contained for a given numeric Dst
+// type if both numeric_limits<Src>::max() <= numeric_limits<Dst>::max() and
+// numeric_limits<Src>::lowest() >= numeric_limits<Dst>::lowest() are true.
+// We implement this as template specializations rather than simple static
+// comparisons to ensure type correctness in our comparisons.
+enum NumericRangeRepresentation
+{
+ NUMERIC_RANGE_NOT_CONTAINED,
+ NUMERIC_RANGE_CONTAINED
+};
+
+// Helper templates to statically determine if our destination type can contain
+// maximum and minimum values represented by the source type.
+
+template <
+ typename Dst,
+ typename Src,
+ IntegerRepresentation DstSign = std::is_signed<Dst>::value ? INTEGER_REPRESENTATION_SIGNED
+ : INTEGER_REPRESENTATION_UNSIGNED,
+ IntegerRepresentation SrcSign = std::is_signed<Src>::value ? INTEGER_REPRESENTATION_SIGNED
+ : INTEGER_REPRESENTATION_UNSIGNED>
+struct StaticDstRangeRelationToSrcRange;
+
+// Same sign: Dst is guaranteed to contain Src only if its range is equal or
+// larger.
+template <typename Dst, typename Src, IntegerRepresentation Sign>
+struct StaticDstRangeRelationToSrcRange<Dst, Src, Sign, Sign>
+{
+ static const NumericRangeRepresentation value =
+ MaxExponent<Dst>::value >= MaxExponent<Src>::value ? NUMERIC_RANGE_CONTAINED
+ : NUMERIC_RANGE_NOT_CONTAINED;
+};
+
+// Unsigned to signed: Dst is guaranteed to contain source only if its range is
+// larger.
+template <typename Dst, typename Src>
+struct StaticDstRangeRelationToSrcRange<Dst,
+ Src,
+ INTEGER_REPRESENTATION_SIGNED,
+ INTEGER_REPRESENTATION_UNSIGNED>
+{
+ static const NumericRangeRepresentation value =
+ MaxExponent<Dst>::value > MaxExponent<Src>::value ? NUMERIC_RANGE_CONTAINED
+ : NUMERIC_RANGE_NOT_CONTAINED;
+};
+
+// Signed to unsigned: Dst cannot be statically determined to contain Src.
+template <typename Dst, typename Src>
+struct StaticDstRangeRelationToSrcRange<Dst,
+ Src,
+ INTEGER_REPRESENTATION_UNSIGNED,
+ INTEGER_REPRESENTATION_SIGNED>
+{
+ static const NumericRangeRepresentation value = NUMERIC_RANGE_NOT_CONTAINED;
+};
+
+// This class wraps the range constraints as separate booleans so the compiler
+// can identify constants and eliminate unused code paths.
+class RangeCheck
+{
+ public:
+ constexpr RangeCheck(bool is_in_lower_bound, bool is_in_upper_bound)
+ : is_underflow_(!is_in_lower_bound), is_overflow_(!is_in_upper_bound)
+ {}
+ constexpr RangeCheck() : is_underflow_(0), is_overflow_(0) {}
+ constexpr bool IsValid() const { return !is_overflow_ && !is_underflow_; }
+ constexpr bool IsInvalid() const { return is_overflow_ && is_underflow_; }
+ constexpr bool IsOverflow() const { return is_overflow_ && !is_underflow_; }
+ constexpr bool IsUnderflow() const { return !is_overflow_ && is_underflow_; }
+ constexpr bool IsOverflowFlagSet() const { return is_overflow_; }
+ constexpr bool IsUnderflowFlagSet() const { return is_underflow_; }
+ constexpr bool operator==(const RangeCheck rhs) const
+ {
+ return is_underflow_ == rhs.is_underflow_ && is_overflow_ == rhs.is_overflow_;
+ }
+ constexpr bool operator!=(const RangeCheck rhs) const { return !(*this == rhs); }
+
+ private:
+ // Do not change the order of these member variables. The integral conversion
+ // optimization depends on this exact order.
+ const bool is_underflow_;
+ const bool is_overflow_;
+};
+
+// The following helper template addresses a corner case in range checks for
+// conversion from a floating-point type to an integral type of smaller range
+// but larger precision (e.g. float -> unsigned). The problem is as follows:
+// 1. Integral maximum is always one less than a power of two, so it must be
+// truncated to fit the mantissa of the floating point. The direction of
+// rounding is implementation defined, but by default it's always IEEE
+// floats, which round to nearest and thus result in a value of larger
+// magnitude than the integral value.
+// Example: float f = UINT_MAX; // f is 4294967296f but UINT_MAX
+// // is 4294967295u.
+// 2. If the floating point value is equal to the promoted integral maximum
+// value, a range check will erroneously pass.
+// Example: (4294967296f <= 4294967295u) // This is true due to a precision
+// // loss in rounding up to float.
+// 3. When the floating point value is then converted to an integral, the
+// resulting value is out of range for the target integral type and
+// thus is implementation defined.
+// Example: unsigned u = (float)INT_MAX; // u will typically overflow to 0.
+// To fix this bug we manually truncate the maximum value when the destination
+// type is an integral of larger precision than the source floating-point type,
+// such that the resulting maximum is represented exactly as a floating point.
+template <typename Dst, typename Src, template <typename> class Bounds>
+struct NarrowingRange
+{
+ using SrcLimits = std::numeric_limits<Src>;
+ using DstLimits = typename std::numeric_limits<Dst>;
+
+ // Computes the mask required to make an accurate comparison between types.
+ static const int kShift =
+ (MaxExponent<Src>::value > MaxExponent<Dst>::value && SrcLimits::digits < DstLimits::digits)
+ ? (DstLimits::digits - SrcLimits::digits)
+ : 0;
+ template <typename T, typename std::enable_if<std::is_integral<T>::value>::type * = nullptr>
+
+ // Masks out the integer bits that are beyond the precision of the
+ // intermediate type used for comparison.
+ static constexpr T Adjust(T value)
+ {
+ static_assert(std::is_same<T, Dst>::value, "");
+ static_assert(kShift < DstLimits::digits, "");
+ return static_cast<T>(ConditionalNegate(SafeUnsignedAbs(value) & ~((T(1) << kShift) - T(1)),
+ IsValueNegative(value)));
+ }
+
+ template <typename T,
+ typename std::enable_if<std::is_floating_point<T>::value>::type * = nullptr>
+ static constexpr T Adjust(T value)
+ {
+ static_assert(std::is_same<T, Dst>::value, "");
+ static_assert(kShift == 0, "");
+ return value;
+ }
+
+ static constexpr Dst max() { return Adjust(Bounds<Dst>::max()); }
+ static constexpr Dst lowest() { return Adjust(Bounds<Dst>::lowest()); }
+};
+
+template <
+ typename Dst,
+ typename Src,
+ template <typename>
+ class Bounds,
+ IntegerRepresentation DstSign = std::is_signed<Dst>::value ? INTEGER_REPRESENTATION_SIGNED
+ : INTEGER_REPRESENTATION_UNSIGNED,
+ IntegerRepresentation SrcSign = std::is_signed<Src>::value ? INTEGER_REPRESENTATION_SIGNED
+ : INTEGER_REPRESENTATION_UNSIGNED,
+ NumericRangeRepresentation DstRange = StaticDstRangeRelationToSrcRange<Dst, Src>::value>
+struct DstRangeRelationToSrcRangeImpl;
+
+// The following templates are for ranges that must be verified at runtime. We
+// split it into checks based on signedness to avoid confusing casts and
+// compiler warnings on signed an unsigned comparisons.
+
+// Same sign narrowing: The range is contained for normal limits.
+template <typename Dst,
+ typename Src,
+ template <typename>
+ class Bounds,
+ IntegerRepresentation DstSign,
+ IntegerRepresentation SrcSign>
+struct DstRangeRelationToSrcRangeImpl<Dst, Src, Bounds, DstSign, SrcSign, NUMERIC_RANGE_CONTAINED>
+{
+ static constexpr RangeCheck Check(Src value)
+ {
+ using SrcLimits = std::numeric_limits<Src>;
+ using DstLimits = NarrowingRange<Dst, Src, Bounds>;
+ return RangeCheck(static_cast<Dst>(SrcLimits::lowest()) >= DstLimits::lowest() ||
+ static_cast<Dst>(value) >= DstLimits::lowest(),
+ static_cast<Dst>(SrcLimits::max()) <= DstLimits::max() ||
+ static_cast<Dst>(value) <= DstLimits::max());
+ }
+};
+
+// Signed to signed narrowing: Both the upper and lower boundaries may be
+// exceeded for standard limits.
+template <typename Dst, typename Src, template <typename> class Bounds>
+struct DstRangeRelationToSrcRangeImpl<Dst,
+ Src,
+ Bounds,
+ INTEGER_REPRESENTATION_SIGNED,
+ INTEGER_REPRESENTATION_SIGNED,
+ NUMERIC_RANGE_NOT_CONTAINED>
+{
+ static constexpr RangeCheck Check(Src value)
+ {
+ using DstLimits = NarrowingRange<Dst, Src, Bounds>;
+ return RangeCheck(value >= DstLimits::lowest(), value <= DstLimits::max());
+ }
+};
+
+// Unsigned to unsigned narrowing: Only the upper bound can be exceeded for
+// standard limits.
+template <typename Dst, typename Src, template <typename> class Bounds>
+struct DstRangeRelationToSrcRangeImpl<Dst,
+ Src,
+ Bounds,
+ INTEGER_REPRESENTATION_UNSIGNED,
+ INTEGER_REPRESENTATION_UNSIGNED,
+ NUMERIC_RANGE_NOT_CONTAINED>
+{
+ static constexpr RangeCheck Check(Src value)
+ {
+ using DstLimits = NarrowingRange<Dst, Src, Bounds>;
+ return RangeCheck(DstLimits::lowest() == Dst(0) || value >= DstLimits::lowest(),
+ value <= DstLimits::max());
+ }
+};
+
+// Unsigned to signed: Only the upper bound can be exceeded for standard limits.
+template <typename Dst, typename Src, template <typename> class Bounds>
+struct DstRangeRelationToSrcRangeImpl<Dst,
+ Src,
+ Bounds,
+ INTEGER_REPRESENTATION_SIGNED,
+ INTEGER_REPRESENTATION_UNSIGNED,
+ NUMERIC_RANGE_NOT_CONTAINED>
+{
+ static constexpr RangeCheck Check(Src value)
+ {
+ using DstLimits = NarrowingRange<Dst, Src, Bounds>;
+ using Promotion = decltype(Src() + Dst());
+ return RangeCheck(
+ DstLimits::lowest() <= Dst(0) ||
+ static_cast<Promotion>(value) >= static_cast<Promotion>(DstLimits::lowest()),
+ static_cast<Promotion>(value) <= static_cast<Promotion>(DstLimits::max()));
+ }
+};
+
+// Signed to unsigned: The upper boundary may be exceeded for a narrower Dst,
+// and any negative value exceeds the lower boundary for standard limits.
+template <typename Dst, typename Src, template <typename> class Bounds>
+struct DstRangeRelationToSrcRangeImpl<Dst,
+ Src,
+ Bounds,
+ INTEGER_REPRESENTATION_UNSIGNED,
+ INTEGER_REPRESENTATION_SIGNED,
+ NUMERIC_RANGE_NOT_CONTAINED>
+{
+ static constexpr RangeCheck Check(Src value)
+ {
+ using SrcLimits = std::numeric_limits<Src>;
+ using DstLimits = NarrowingRange<Dst, Src, Bounds>;
+ using Promotion = decltype(Src() + Dst());
+ bool ge_zero = false;
+ // Converting floating-point to integer will discard fractional part, so
+ // values in (-1.0, -0.0) will truncate to 0 and fit in Dst.
+ if (std::is_floating_point<Src>::value)
+ {
+ ge_zero = value > Src(-1);
+ }
+ else
+ {
+ ge_zero = value >= Src(0);
+ }
+ return RangeCheck(
+ ge_zero && (DstLimits::lowest() == 0 || static_cast<Dst>(value) >= DstLimits::lowest()),
+ static_cast<Promotion>(SrcLimits::max()) <= static_cast<Promotion>(DstLimits::max()) ||
+ static_cast<Promotion>(value) <= static_cast<Promotion>(DstLimits::max()));
+ }
+};
+
+// Simple wrapper for statically checking if a type's range is contained.
+template <typename Dst, typename Src>
+struct IsTypeInRangeForNumericType
+{
+ static const bool value =
+ StaticDstRangeRelationToSrcRange<Dst, Src>::value == NUMERIC_RANGE_CONTAINED;
+};
+
+template <typename Dst, template <typename> class Bounds = std::numeric_limits, typename Src>
+constexpr RangeCheck DstRangeRelationToSrcRange(Src value)
+{
+ static_assert(std::is_arithmetic<Src>::value, "Argument must be numeric.");
+ static_assert(std::is_arithmetic<Dst>::value, "Result must be numeric.");
+ static_assert(Bounds<Dst>::lowest() < Bounds<Dst>::max(), "");
+ return DstRangeRelationToSrcRangeImpl<Dst, Src, Bounds>::Check(value);
+}
+
+// Integer promotion templates used by the portable checked integer arithmetic.
+template <size_t Size, bool IsSigned>
+struct IntegerForDigitsAndSign;
+
+#define INTEGER_FOR_DIGITS_AND_SIGN(I) \
+ template <> \
+ struct IntegerForDigitsAndSign<IntegerBitsPlusSign<I>::value, std::is_signed<I>::value> \
+ { \
+ using type = I; \
+ }
+
+INTEGER_FOR_DIGITS_AND_SIGN(int8_t);
+INTEGER_FOR_DIGITS_AND_SIGN(uint8_t);
+INTEGER_FOR_DIGITS_AND_SIGN(int16_t);
+INTEGER_FOR_DIGITS_AND_SIGN(uint16_t);
+INTEGER_FOR_DIGITS_AND_SIGN(int32_t);
+INTEGER_FOR_DIGITS_AND_SIGN(uint32_t);
+INTEGER_FOR_DIGITS_AND_SIGN(int64_t);
+INTEGER_FOR_DIGITS_AND_SIGN(uint64_t);
+#undef INTEGER_FOR_DIGITS_AND_SIGN
+
+// WARNING: We have no IntegerForSizeAndSign<16, *>. If we ever add one to
+// support 128-bit math, then the ArithmeticPromotion template below will need
+// to be updated (or more likely replaced with a decltype expression).
+static_assert(IntegerBitsPlusSign<intmax_t>::value == 64,
+ "Max integer size not supported for this toolchain.");
+
+template <typename Integer, bool IsSigned = std::is_signed<Integer>::value>
+struct TwiceWiderInteger
+{
+ using type =
+ typename IntegerForDigitsAndSign<IntegerBitsPlusSign<Integer>::value * 2, IsSigned>::type;
+};
+
+enum ArithmeticPromotionCategory
+{
+ LEFT_PROMOTION, // Use the type of the left-hand argument.
+ RIGHT_PROMOTION // Use the type of the right-hand argument.
+};
+
+// Determines the type that can represent the largest positive value.
+template <typename Lhs,
+ typename Rhs,
+ ArithmeticPromotionCategory Promotion =
+ (MaxExponent<Lhs>::value > MaxExponent<Rhs>::value) ? LEFT_PROMOTION
+ : RIGHT_PROMOTION>
+struct MaxExponentPromotion;
+
+template <typename Lhs, typename Rhs>
+struct MaxExponentPromotion<Lhs, Rhs, LEFT_PROMOTION>
+{
+ using type = Lhs;
+};
+
+template <typename Lhs, typename Rhs>
+struct MaxExponentPromotion<Lhs, Rhs, RIGHT_PROMOTION>
+{
+ using type = Rhs;
+};
+
+// Determines the type that can represent the lowest arithmetic value.
+template <typename Lhs,
+ typename Rhs,
+ ArithmeticPromotionCategory Promotion =
+ std::is_signed<Lhs>::value
+ ? (std::is_signed<Rhs>::value
+ ? (MaxExponent<Lhs>::value > MaxExponent<Rhs>::value ? LEFT_PROMOTION
+ : RIGHT_PROMOTION)
+ : LEFT_PROMOTION)
+ : (std::is_signed<Rhs>::value
+ ? RIGHT_PROMOTION
+ : (MaxExponent<Lhs>::value < MaxExponent<Rhs>::value ? LEFT_PROMOTION
+ : RIGHT_PROMOTION))>
+struct LowestValuePromotion;
+
+template <typename Lhs, typename Rhs>
+struct LowestValuePromotion<Lhs, Rhs, LEFT_PROMOTION>
+{
+ using type = Lhs;
+};
+
+template <typename Lhs, typename Rhs>
+struct LowestValuePromotion<Lhs, Rhs, RIGHT_PROMOTION>
+{
+ using type = Rhs;
+};
+
+// Determines the type that is best able to represent an arithmetic result.
+template <typename Lhs,
+ typename Rhs = Lhs,
+ bool is_intmax_type =
+ std::is_integral<typename MaxExponentPromotion<Lhs, Rhs>::type>::value
+ &&IntegerBitsPlusSign<typename MaxExponentPromotion<Lhs, Rhs>::type>::value ==
+ IntegerBitsPlusSign<intmax_t>::value,
+ bool is_max_exponent =
+ StaticDstRangeRelationToSrcRange<typename MaxExponentPromotion<Lhs, Rhs>::type,
+ Lhs>::value ==
+ NUMERIC_RANGE_CONTAINED
+ &&StaticDstRangeRelationToSrcRange<typename MaxExponentPromotion<Lhs, Rhs>::type,
+ Rhs>::value == NUMERIC_RANGE_CONTAINED>
+struct BigEnoughPromotion;
+
+// The side with the max exponent is big enough.
+template <typename Lhs, typename Rhs, bool is_intmax_type>
+struct BigEnoughPromotion<Lhs, Rhs, is_intmax_type, true>
+{
+ using type = typename MaxExponentPromotion<Lhs, Rhs>::type;
+ static const bool is_contained = true;
+};
+
+// We can use a twice wider type to fit.
+template <typename Lhs, typename Rhs>
+struct BigEnoughPromotion<Lhs, Rhs, false, false>
+{
+ using type =
+ typename TwiceWiderInteger<typename MaxExponentPromotion<Lhs, Rhs>::type,
+ std::is_signed<Lhs>::value || std::is_signed<Rhs>::value>::type;
+ static const bool is_contained = true;
+};
+
+// No type is large enough.
+template <typename Lhs, typename Rhs>
+struct BigEnoughPromotion<Lhs, Rhs, true, false>
+{
+ using type = typename MaxExponentPromotion<Lhs, Rhs>::type;
+ static const bool is_contained = false;
+};
+
+// We can statically check if operations on the provided types can wrap, so we
+// can skip the checked operations if they're not needed. So, for an integer we
+// care if the destination type preserves the sign and is twice the width of
+// the source.
+template <typename T, typename Lhs, typename Rhs = Lhs>
+struct IsIntegerArithmeticSafe
+{
+ static const bool value =
+ !std::is_floating_point<T>::value && !std::is_floating_point<Lhs>::value &&
+ !std::is_floating_point<Rhs>::value &&
+ std::is_signed<T>::value >= std::is_signed<Lhs>::value &&
+ IntegerBitsPlusSign<T>::value >= (2 * IntegerBitsPlusSign<Lhs>::value) &&
+ std::is_signed<T>::value >= std::is_signed<Rhs>::value &&
+ IntegerBitsPlusSign<T>::value >= (2 * IntegerBitsPlusSign<Rhs>::value);
+};
+
+// Promotes to a type that can represent any possible result of a binary
+// arithmetic operation with the source types.
+template <typename Lhs,
+ typename Rhs,
+ bool is_promotion_possible = IsIntegerArithmeticSafe<
+ typename std::conditional<std::is_signed<Lhs>::value || std::is_signed<Rhs>::value,
+ intmax_t,
+ uintmax_t>::type,
+ typename MaxExponentPromotion<Lhs, Rhs>::type>::value>
+struct FastIntegerArithmeticPromotion;
+
+template <typename Lhs, typename Rhs>
+struct FastIntegerArithmeticPromotion<Lhs, Rhs, true>
+{
+ using type =
+ typename TwiceWiderInteger<typename MaxExponentPromotion<Lhs, Rhs>::type,
+ std::is_signed<Lhs>::value || std::is_signed<Rhs>::value>::type;
+ static_assert(IsIntegerArithmeticSafe<type, Lhs, Rhs>::value, "");
+ static const bool is_contained = true;
+};
+
+template <typename Lhs, typename Rhs>
+struct FastIntegerArithmeticPromotion<Lhs, Rhs, false>
+{
+ using type = typename BigEnoughPromotion<Lhs, Rhs>::type;
+ static const bool is_contained = false;
+};
+
+// Extracts the underlying type from an enum.
+template <typename T, bool is_enum = std::is_enum<T>::value>
+struct ArithmeticOrUnderlyingEnum;
+
+template <typename T>
+struct ArithmeticOrUnderlyingEnum<T, true>
+{
+ using type = typename std::underlying_type<T>::type;
+ static const bool value = std::is_arithmetic<type>::value;
+};
+
+template <typename T>
+struct ArithmeticOrUnderlyingEnum<T, false>
+{
+ using type = T;
+ static const bool value = std::is_arithmetic<type>::value;
+};
+
+// The following are helper templates used in the CheckedNumeric class.
+template <typename T>
+class CheckedNumeric;
+
+template <typename T>
+class ClampedNumeric;
+
+template <typename T>
+class StrictNumeric;
+
+// Used to treat CheckedNumeric and arithmetic underlying types the same.
+template <typename T>
+struct UnderlyingType
+{
+ using type = typename ArithmeticOrUnderlyingEnum<T>::type;
+ static const bool is_numeric = std::is_arithmetic<type>::value;
+ static const bool is_checked = false;
+ static const bool is_clamped = false;
+ static const bool is_strict = false;
+};
+
+template <typename T>
+struct UnderlyingType<CheckedNumeric<T>>
+{
+ using type = T;
+ static const bool is_numeric = true;
+ static const bool is_checked = true;
+ static const bool is_clamped = false;
+ static const bool is_strict = false;
+};
+
+template <typename T>
+struct UnderlyingType<ClampedNumeric<T>>
+{
+ using type = T;
+ static const bool is_numeric = true;
+ static const bool is_checked = false;
+ static const bool is_clamped = true;
+ static const bool is_strict = false;
+};
+
+template <typename T>
+struct UnderlyingType<StrictNumeric<T>>
+{
+ using type = T;
+ static const bool is_numeric = true;
+ static const bool is_checked = false;
+ static const bool is_clamped = false;
+ static const bool is_strict = true;
+};
+
+template <typename L, typename R>
+struct IsCheckedOp
+{
+ static const bool value = UnderlyingType<L>::is_numeric && UnderlyingType<R>::is_numeric &&
+ (UnderlyingType<L>::is_checked || UnderlyingType<R>::is_checked);
+};
+
+template <typename L, typename R>
+struct IsClampedOp
+{
+ static const bool value = UnderlyingType<L>::is_numeric && UnderlyingType<R>::is_numeric &&
+ (UnderlyingType<L>::is_clamped || UnderlyingType<R>::is_clamped) &&
+ !(UnderlyingType<L>::is_checked || UnderlyingType<R>::is_checked);
+};
+
+template <typename L, typename R>
+struct IsStrictOp
+{
+ static const bool value = UnderlyingType<L>::is_numeric && UnderlyingType<R>::is_numeric &&
+ (UnderlyingType<L>::is_strict || UnderlyingType<R>::is_strict) &&
+ !(UnderlyingType<L>::is_checked || UnderlyingType<R>::is_checked) &&
+ !(UnderlyingType<L>::is_clamped || UnderlyingType<R>::is_clamped);
+};
+
+// as_signed<> returns the supplied integral value (or integral castable
+// Numeric template) cast as a signed integral of equivalent precision.
+// I.e. it's mostly an alias for: static_cast<std::make_signed<T>::type>(t)
+template <typename Src>
+constexpr typename std::make_signed<typename base::internal::UnderlyingType<Src>::type>::type
+as_signed(const Src value)
+{
+ static_assert(std::is_integral<decltype(as_signed(value))>::value,
+ "Argument must be a signed or unsigned integer type.");
+ return static_cast<decltype(as_signed(value))>(value);
+}
+
+// as_unsigned<> returns the supplied integral value (or integral castable
+// Numeric template) cast as an unsigned integral of equivalent precision.
+// I.e. it's mostly an alias for: static_cast<std::make_unsigned<T>::type>(t)
+template <typename Src>
+constexpr typename std::make_unsigned<typename base::internal::UnderlyingType<Src>::type>::type
+as_unsigned(const Src value)
+{
+ static_assert(std::is_integral<decltype(as_unsigned(value))>::value,
+ "Argument must be a signed or unsigned integer type.");
+ return static_cast<decltype(as_unsigned(value))>(value);
+}
+
+template <typename L, typename R>
+constexpr bool IsLessImpl(const L lhs,
+ const R rhs,
+ const RangeCheck l_range,
+ const RangeCheck r_range)
+{
+ return l_range.IsUnderflow() || r_range.IsOverflow() ||
+ (l_range == r_range &&
+ static_cast<decltype(lhs + rhs)>(lhs) < static_cast<decltype(lhs + rhs)>(rhs));
+}
+
+template <typename L, typename R>
+struct IsLess
+{
+ static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+ "Types must be numeric.");
+ static constexpr bool Test(const L lhs, const R rhs)
+ {
+ return IsLessImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
+ DstRangeRelationToSrcRange<L>(rhs));
+ }
+};
+
+template <typename L, typename R>
+constexpr bool IsLessOrEqualImpl(const L lhs,
+ const R rhs,
+ const RangeCheck l_range,
+ const RangeCheck r_range)
+{
+ return l_range.IsUnderflow() || r_range.IsOverflow() ||
+ (l_range == r_range &&
+ static_cast<decltype(lhs + rhs)>(lhs) <= static_cast<decltype(lhs + rhs)>(rhs));
+}
+
+template <typename L, typename R>
+struct IsLessOrEqual
+{
+ static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+ "Types must be numeric.");
+ static constexpr bool Test(const L lhs, const R rhs)
+ {
+ return IsLessOrEqualImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
+ DstRangeRelationToSrcRange<L>(rhs));
+ }
+};
+
+template <typename L, typename R>
+constexpr bool IsGreaterImpl(const L lhs,
+ const R rhs,
+ const RangeCheck l_range,
+ const RangeCheck r_range)
+{
+ return l_range.IsOverflow() || r_range.IsUnderflow() ||
+ (l_range == r_range &&
+ static_cast<decltype(lhs + rhs)>(lhs) > static_cast<decltype(lhs + rhs)>(rhs));
+}
+
+template <typename L, typename R>
+struct IsGreater
+{
+ static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+ "Types must be numeric.");
+ static constexpr bool Test(const L lhs, const R rhs)
+ {
+ return IsGreaterImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
+ DstRangeRelationToSrcRange<L>(rhs));
+ }
+};
+
+template <typename L, typename R>
+constexpr bool IsGreaterOrEqualImpl(const L lhs,
+ const R rhs,
+ const RangeCheck l_range,
+ const RangeCheck r_range)
+{
+ return l_range.IsOverflow() || r_range.IsUnderflow() ||
+ (l_range == r_range &&
+ static_cast<decltype(lhs + rhs)>(lhs) >= static_cast<decltype(lhs + rhs)>(rhs));
+}
+
+template <typename L, typename R>
+struct IsGreaterOrEqual
+{
+ static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+ "Types must be numeric.");
+ static constexpr bool Test(const L lhs, const R rhs)
+ {
+ return IsGreaterOrEqualImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
+ DstRangeRelationToSrcRange<L>(rhs));
+ }
+};
+
+template <typename L, typename R>
+struct IsEqual
+{
+ static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+ "Types must be numeric.");
+ static constexpr bool Test(const L lhs, const R rhs)
+ {
+ return DstRangeRelationToSrcRange<R>(lhs) == DstRangeRelationToSrcRange<L>(rhs) &&
+ static_cast<decltype(lhs + rhs)>(lhs) == static_cast<decltype(lhs + rhs)>(rhs);
+ }
+};
+
+template <typename L, typename R>
+struct IsNotEqual
+{
+ static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+ "Types must be numeric.");
+ static constexpr bool Test(const L lhs, const R rhs)
+ {
+ return DstRangeRelationToSrcRange<R>(lhs) != DstRangeRelationToSrcRange<L>(rhs) ||
+ static_cast<decltype(lhs + rhs)>(lhs) != static_cast<decltype(lhs + rhs)>(rhs);
+ }
+};
+
+// These perform the actual math operations on the CheckedNumerics.
+// Binary arithmetic operations.
+template <template <typename, typename> class C, typename L, typename R>
+constexpr bool SafeCompare(const L lhs, const R rhs)
+{
+ static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+ "Types must be numeric.");
+ using Promotion = BigEnoughPromotion<L, R>;
+ using BigType = typename Promotion::type;
+ return Promotion::is_contained
+ // Force to a larger type for speed if both are contained.
+ ? C<BigType, BigType>::Test(static_cast<BigType>(static_cast<L>(lhs)),
+ static_cast<BigType>(static_cast<R>(rhs)))
+ // Let the template functions figure it out for mixed types.
+ : C<L, R>::Test(lhs, rhs);
+}
+
+template <typename Dst, typename Src>
+constexpr bool IsMaxInRangeForNumericType()
+{
+ return IsGreaterOrEqual<Dst, Src>::Test(std::numeric_limits<Dst>::max(),
+ std::numeric_limits<Src>::max());
+}
+
+template <typename Dst, typename Src>
+constexpr bool IsMinInRangeForNumericType()
+{
+ return IsLessOrEqual<Dst, Src>::Test(std::numeric_limits<Dst>::lowest(),
+ std::numeric_limits<Src>::lowest());
+}
+
+template <typename Dst, typename Src>
+constexpr Dst CommonMax()
+{
+ return !IsMaxInRangeForNumericType<Dst, Src>() ? Dst(std::numeric_limits<Dst>::max())
+ : Dst(std::numeric_limits<Src>::max());
+}
+
+template <typename Dst, typename Src>
+constexpr Dst CommonMin()
+{
+ return !IsMinInRangeForNumericType<Dst, Src>() ? Dst(std::numeric_limits<Dst>::lowest())
+ : Dst(std::numeric_limits<Src>::lowest());
+}
+
+// This is a wrapper to generate return the max or min for a supplied type.
+// If the argument is false, the returned value is the maximum. If true the
+// returned value is the minimum.
+template <typename Dst, typename Src = Dst>
+constexpr Dst CommonMaxOrMin(bool is_min)
+{
+ return is_min ? CommonMin<Dst, Src>() : CommonMax<Dst, Src>();
+}
+
+} // namespace internal
+} // namespace base
+} // namespace angle
+
+#endif // BASE_NUMERICS_SAFE_CONVERSIONS_IMPL_H_
diff --git a/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_math.h b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_math.h
new file mode 100644
index 0000000000..a708cfddc2
--- /dev/null
+++ b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_math.h
@@ -0,0 +1,12 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NUMERICS_SAFE_MATH_H_
+#define BASE_NUMERICS_SAFE_MATH_H_
+
+#include "anglebase/numerics/checked_math.h"
+#include "anglebase/numerics/clamped_math.h"
+#include "anglebase/numerics/safe_conversions.h"
+
+#endif // BASE_NUMERICS_SAFE_MATH_H_
diff --git a/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_math_arm_impl.h b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_math_arm_impl.h
new file mode 100644
index 0000000000..3efdf2596d
--- /dev/null
+++ b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_math_arm_impl.h
@@ -0,0 +1,131 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NUMERICS_SAFE_MATH_ARM_IMPL_H_
+#define BASE_NUMERICS_SAFE_MATH_ARM_IMPL_H_
+
+#include <cassert>
+#include <type_traits>
+
+#include "anglebase/numerics/safe_conversions.h"
+
+namespace angle
+{
+namespace base
+{
+namespace internal
+{
+
+template <typename T, typename U>
+struct CheckedMulFastAsmOp
+{
+ static const bool is_supported = FastIntegerArithmeticPromotion<T, U>::is_contained;
+
+ // The following is much more efficient than the Clang and GCC builtins for
+ // performing overflow-checked multiplication when a twice wider type is
+ // available. The below compiles down to 2-3 instructions, depending on the
+ // width of the types in use.
+ // As an example, an int32_t multiply compiles to:
+ // smull r0, r1, r0, r1
+ // cmp r1, r1, asr #31
+ // And an int16_t multiply compiles to:
+ // smulbb r1, r1, r0
+ // asr r2, r1, #16
+ // cmp r2, r1, asr #15
+ template <typename V>
+ __attribute__((always_inline)) static bool Do(T x, U y, V *result)
+ {
+ using Promotion = typename FastIntegerArithmeticPromotion<T, U>::type;
+ Promotion presult;
+
+ presult = static_cast<Promotion>(x) * static_cast<Promotion>(y);
+ if (!IsValueInRangeForNumericType<V>(presult))
+ return false;
+ *result = static_cast<V>(presult);
+ return true;
+ }
+};
+
+template <typename T, typename U>
+struct ClampedAddFastAsmOp
+{
+ static const bool is_supported =
+ BigEnoughPromotion<T, U>::is_contained &&
+ IsTypeInRangeForNumericType<int32_t, typename BigEnoughPromotion<T, U>::type>::value;
+
+ template <typename V>
+ __attribute__((always_inline)) static V Do(T x, U y)
+ {
+ // This will get promoted to an int, so let the compiler do whatever is
+ // clever and rely on the saturated cast to bounds check.
+ if (IsIntegerArithmeticSafe<int, T, U>::value)
+ return saturated_cast<V>(x + y);
+
+ int32_t result;
+ int32_t x_i32 = checked_cast<int32_t>(x);
+ int32_t y_i32 = checked_cast<int32_t>(y);
+
+ asm("qadd %[result], %[first], %[second]"
+ : [result] "=r"(result)
+ : [first] "r"(x_i32), [second] "r"(y_i32));
+ return saturated_cast<V>(result);
+ }
+};
+
+template <typename T, typename U>
+struct ClampedSubFastAsmOp
+{
+ static const bool is_supported =
+ BigEnoughPromotion<T, U>::is_contained &&
+ IsTypeInRangeForNumericType<int32_t, typename BigEnoughPromotion<T, U>::type>::value;
+
+ template <typename V>
+ __attribute__((always_inline)) static V Do(T x, U y)
+ {
+ // This will get promoted to an int, so let the compiler do whatever is
+ // clever and rely on the saturated cast to bounds check.
+ if (IsIntegerArithmeticSafe<int, T, U>::value)
+ return saturated_cast<V>(x - y);
+
+ int32_t result;
+ int32_t x_i32 = checked_cast<int32_t>(x);
+ int32_t y_i32 = checked_cast<int32_t>(y);
+
+ asm("qsub %[result], %[first], %[second]"
+ : [result] "=r"(result)
+ : [first] "r"(x_i32), [second] "r"(y_i32));
+ return saturated_cast<V>(result);
+ }
+};
+
+template <typename T, typename U>
+struct ClampedMulFastAsmOp
+{
+ static const bool is_supported = CheckedMulFastAsmOp<T, U>::is_supported;
+
+ template <typename V>
+ __attribute__((always_inline)) static V Do(T x, U y)
+ {
+ // Use the CheckedMulFastAsmOp for full-width 32-bit values, because
+ // it's fewer instructions than promoting and then saturating.
+ if (!IsIntegerArithmeticSafe<int32_t, T, U>::value &&
+ !IsIntegerArithmeticSafe<uint32_t, T, U>::value)
+ {
+ V result;
+ return CheckedMulFastAsmOp<T, U>::Do(x, y, &result)
+ ? result
+ : CommonMaxOrMin<V>(IsValueNegative(x) ^ IsValueNegative(y));
+ }
+
+ assert((FastIntegerArithmeticPromotion<T, U>::is_contained));
+ using Promotion = typename FastIntegerArithmeticPromotion<T, U>::type;
+ return saturated_cast<V>(static_cast<Promotion>(x) * static_cast<Promotion>(y));
+ }
+};
+
+} // namespace internal
+} // namespace base
+} // namespace angle
+
+#endif // BASE_NUMERICS_SAFE_MATH_ARM_IMPL_H_
diff --git a/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_math_clang_gcc_impl.h b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_math_clang_gcc_impl.h
new file mode 100644
index 0000000000..0f6a1e14d6
--- /dev/null
+++ b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_math_clang_gcc_impl.h
@@ -0,0 +1,182 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NUMERICS_SAFE_MATH_CLANG_GCC_IMPL_H_
+#define BASE_NUMERICS_SAFE_MATH_CLANG_GCC_IMPL_H_
+
+#include <cassert>
+#include <limits>
+#include <type_traits>
+
+#include "anglebase/numerics/safe_conversions.h"
+
+#if !defined(__native_client__) && (defined(__ARMEL__) || defined(__arch64__))
+# include "anglebase/numerics/safe_math_arm_impl.h"
+# define BASE_HAS_ASSEMBLER_SAFE_MATH (1)
+#else
+# define BASE_HAS_ASSEMBLER_SAFE_MATH (0)
+#endif
+
+namespace angle
+{
+namespace base
+{
+namespace internal
+{
+
+// These are the non-functioning boilerplate implementations of the optimized
+// safe math routines.
+#if !BASE_HAS_ASSEMBLER_SAFE_MATH
+template <typename T, typename U>
+struct CheckedMulFastAsmOp
+{
+ static const bool is_supported = false;
+ template <typename V>
+ static constexpr bool Do(T, U, V *)
+ {
+ // Force a compile failure if instantiated.
+ return CheckOnFailure::template HandleFailure<bool>();
+ }
+};
+
+template <typename T, typename U>
+struct ClampedAddFastAsmOp
+{
+ static const bool is_supported = false;
+ template <typename V>
+ static constexpr V Do(T, U)
+ {
+ // Force a compile failure if instantiated.
+ return CheckOnFailure::template HandleFailure<V>();
+ }
+};
+
+template <typename T, typename U>
+struct ClampedSubFastAsmOp
+{
+ static const bool is_supported = false;
+ template <typename V>
+ static constexpr V Do(T, U)
+ {
+ // Force a compile failure if instantiated.
+ return CheckOnFailure::template HandleFailure<V>();
+ }
+};
+
+template <typename T, typename U>
+struct ClampedMulFastAsmOp
+{
+ static const bool is_supported = false;
+ template <typename V>
+ static constexpr V Do(T, U)
+ {
+ // Force a compile failure if instantiated.
+ return CheckOnFailure::template HandleFailure<V>();
+ }
+};
+#endif // BASE_HAS_ASSEMBLER_SAFE_MATH
+#undef BASE_HAS_ASSEMBLER_SAFE_MATH
+
+template <typename T, typename U>
+struct CheckedAddFastOp
+{
+ static const bool is_supported = true;
+ template <typename V>
+ __attribute__((always_inline)) static constexpr bool Do(T x, U y, V *result)
+ {
+ return !__builtin_add_overflow(x, y, result);
+ }
+};
+
+template <typename T, typename U>
+struct CheckedSubFastOp
+{
+ static const bool is_supported = true;
+ template <typename V>
+ __attribute__((always_inline)) static constexpr bool Do(T x, U y, V *result)
+ {
+ return !__builtin_sub_overflow(x, y, result);
+ }
+};
+
+template <typename T, typename U>
+struct CheckedMulFastOp
+{
+#if defined(__clang__)
+ // TODO(jschuh): Get the Clang runtime library issues sorted out so we can
+ // support full-width, mixed-sign multiply builtins.
+ // https://crbug.com/613003
+ // We can support intptr_t, uintptr_t, or a smaller common type.
+ static const bool is_supported = (IsTypeInRangeForNumericType<intptr_t, T>::value &&
+ IsTypeInRangeForNumericType<intptr_t, U>::value) ||
+ (IsTypeInRangeForNumericType<uintptr_t, T>::value &&
+ IsTypeInRangeForNumericType<uintptr_t, U>::value);
+#else
+ static const bool is_supported = true;
+#endif
+ template <typename V>
+ __attribute__((always_inline)) static constexpr bool Do(T x, U y, V *result)
+ {
+ return CheckedMulFastAsmOp<T, U>::is_supported ? CheckedMulFastAsmOp<T, U>::Do(x, y, result)
+ : !__builtin_mul_overflow(x, y, result);
+ }
+};
+
+template <typename T, typename U>
+struct ClampedAddFastOp
+{
+ static const bool is_supported = ClampedAddFastAsmOp<T, U>::is_supported;
+ template <typename V>
+ __attribute__((always_inline)) static V Do(T x, U y)
+ {
+ return ClampedAddFastAsmOp<T, U>::template Do<V>(x, y);
+ }
+};
+
+template <typename T, typename U>
+struct ClampedSubFastOp
+{
+ static const bool is_supported = ClampedSubFastAsmOp<T, U>::is_supported;
+ template <typename V>
+ __attribute__((always_inline)) static V Do(T x, U y)
+ {
+ return ClampedSubFastAsmOp<T, U>::template Do<V>(x, y);
+ }
+};
+
+template <typename T, typename U>
+struct ClampedMulFastOp
+{
+ static const bool is_supported = ClampedMulFastAsmOp<T, U>::is_supported;
+ template <typename V>
+ __attribute__((always_inline)) static V Do(T x, U y)
+ {
+ return ClampedMulFastAsmOp<T, U>::template Do<V>(x, y);
+ }
+};
+
+template <typename T>
+struct ClampedNegFastOp
+{
+ static const bool is_supported = std::is_signed<T>::value;
+ __attribute__((always_inline)) static T Do(T value)
+ {
+ // Use this when there is no assembler path available.
+ if (!ClampedSubFastAsmOp<T, T>::is_supported)
+ {
+ T result;
+ return !__builtin_sub_overflow(T(0), value, &result) ? result
+ : std::numeric_limits<T>::max();
+ }
+
+ // Fallback to the normal subtraction path.
+ return ClampedSubFastOp<T, T>::template Do<T>(T(0), value);
+ }
+};
+
+} // namespace internal
+} // namespace base
+} // namespace angle
+
+#endif // BASE_NUMERICS_SAFE_MATH_CLANG_GCC_IMPL_H_
diff --git a/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_math_shared_impl.h b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_math_shared_impl.h
new file mode 100644
index 0000000000..6f46f12e5f
--- /dev/null
+++ b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_math_shared_impl.h
@@ -0,0 +1,227 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NUMERICS_SAFE_MATH_SHARED_IMPL_H_
+#define BASE_NUMERICS_SAFE_MATH_SHARED_IMPL_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <cassert>
+#include <climits>
+#include <cmath>
+#include <cstdlib>
+#include <limits>
+#include <type_traits>
+
+#include "anglebase/numerics/safe_conversions.h"
+
+#if defined(OS_ASMJS)
+// Optimized safe math instructions are incompatible with asmjs.
+# define BASE_HAS_OPTIMIZED_SAFE_MATH (0)
+// Where available use builtin math overflow support on Clang and GCC.
+#elif !defined(__native_client__) && \
+ ((defined(__clang__) && \
+ ((__clang_major__ > 3) || (__clang_major__ == 3 && __clang_minor__ >= 4))) || \
+ (defined(__GNUC__) && __GNUC__ >= 5))
+# include "anglebase/numerics/safe_math_clang_gcc_impl.h"
+# define BASE_HAS_OPTIMIZED_SAFE_MATH (1)
+#else
+# define BASE_HAS_OPTIMIZED_SAFE_MATH (0)
+#endif
+
+namespace angle
+{
+namespace base
+{
+namespace internal
+{
+
+// These are the non-functioning boilerplate implementations of the optimized
+// safe math routines.
+#if !BASE_HAS_OPTIMIZED_SAFE_MATH
+template <typename T, typename U>
+struct CheckedAddFastOp
+{
+ static const bool is_supported = false;
+ template <typename V>
+ static constexpr bool Do(T, U, V *)
+ {
+ // Force a compile failure if instantiated.
+ return CheckOnFailure::template HandleFailure<bool>();
+ }
+};
+
+template <typename T, typename U>
+struct CheckedSubFastOp
+{
+ static const bool is_supported = false;
+ template <typename V>
+ static constexpr bool Do(T, U, V *)
+ {
+ // Force a compile failure if instantiated.
+ return CheckOnFailure::template HandleFailure<bool>();
+ }
+};
+
+template <typename T, typename U>
+struct CheckedMulFastOp
+{
+ static const bool is_supported = false;
+ template <typename V>
+ static constexpr bool Do(T, U, V *)
+ {
+ // Force a compile failure if instantiated.
+ return CheckOnFailure::template HandleFailure<bool>();
+ }
+};
+
+template <typename T, typename U>
+struct ClampedAddFastOp
+{
+ static const bool is_supported = false;
+ template <typename V>
+ static constexpr V Do(T, U)
+ {
+ // Force a compile failure if instantiated.
+ return CheckOnFailure::template HandleFailure<V>();
+ }
+};
+
+template <typename T, typename U>
+struct ClampedSubFastOp
+{
+ static const bool is_supported = false;
+ template <typename V>
+ static constexpr V Do(T, U)
+ {
+ // Force a compile failure if instantiated.
+ return CheckOnFailure::template HandleFailure<V>();
+ }
+};
+
+template <typename T, typename U>
+struct ClampedMulFastOp
+{
+ static const bool is_supported = false;
+ template <typename V>
+ static constexpr V Do(T, U)
+ {
+ // Force a compile failure if instantiated.
+ return CheckOnFailure::template HandleFailure<V>();
+ }
+};
+
+template <typename T>
+struct ClampedNegFastOp
+{
+ static const bool is_supported = false;
+ static constexpr T Do(T)
+ {
+ // Force a compile failure if instantiated.
+ return CheckOnFailure::template HandleFailure<T>();
+ }
+};
+#endif // BASE_HAS_OPTIMIZED_SAFE_MATH
+#undef BASE_HAS_OPTIMIZED_SAFE_MATH
+
+// This is used for UnsignedAbs, where we need to support floating-point
+// template instantiations even though we don't actually support the operations.
+// However, there is no corresponding implementation of e.g. SafeUnsignedAbs,
+// so the float versions will not compile.
+template <typename Numeric,
+ bool IsInteger = std::is_integral<Numeric>::value,
+ bool IsFloat = std::is_floating_point<Numeric>::value>
+struct UnsignedOrFloatForSize;
+
+template <typename Numeric>
+struct UnsignedOrFloatForSize<Numeric, true, false>
+{
+ using type = typename std::make_unsigned<Numeric>::type;
+};
+
+template <typename Numeric>
+struct UnsignedOrFloatForSize<Numeric, false, true>
+{
+ using type = Numeric;
+};
+
+// Wrap the unary operations to allow SFINAE when instantiating integrals versus
+// floating points. These don't perform any overflow checking. Rather, they
+// exhibit well-defined overflow semantics and rely on the caller to detect
+// if an overflow occured.
+
+template <typename T, typename std::enable_if<std::is_integral<T>::value>::type * = nullptr>
+constexpr T NegateWrapper(T value)
+{
+ using UnsignedT = typename std::make_unsigned<T>::type;
+ // This will compile to a NEG on Intel, and is normal negation on ARM.
+ return static_cast<T>(UnsignedT(0) - static_cast<UnsignedT>(value));
+}
+
+template <typename T, typename std::enable_if<std::is_floating_point<T>::value>::type * = nullptr>
+constexpr T NegateWrapper(T value)
+{
+ return -value;
+}
+
+template <typename T, typename std::enable_if<std::is_integral<T>::value>::type * = nullptr>
+constexpr typename std::make_unsigned<T>::type InvertWrapper(T value)
+{
+ return ~value;
+}
+
+template <typename T, typename std::enable_if<std::is_integral<T>::value>::type * = nullptr>
+constexpr T AbsWrapper(T value)
+{
+ return static_cast<T>(SafeUnsignedAbs(value));
+}
+
+template <typename T, typename std::enable_if<std::is_floating_point<T>::value>::type * = nullptr>
+constexpr T AbsWrapper(T value)
+{
+ return value < 0 ? -value : value;
+}
+
+template <template <typename, typename, typename> class M, typename L, typename R>
+struct MathWrapper
+{
+ using math = M<typename UnderlyingType<L>::type, typename UnderlyingType<R>::type, void>;
+ using type = typename math::result_type;
+};
+
+// The following macros are just boilerplate for the standard arithmetic
+// operator overloads and variadic function templates. A macro isn't the nicest
+// solution, but it beats rewriting these over and over again.
+#define BASE_NUMERIC_ARITHMETIC_VARIADIC(CLASS, CL_ABBR, OP_NAME) \
+ template <typename L, typename R, typename... Args> \
+ constexpr auto CL_ABBR##OP_NAME(const L lhs, const R rhs, const Args... args) \
+ { \
+ return CL_ABBR##MathOp<CLASS##OP_NAME##Op, L, R, Args...>(lhs, rhs, args...); \
+ }
+
+#define BASE_NUMERIC_ARITHMETIC_OPERATORS(CLASS, CL_ABBR, OP_NAME, OP, CMP_OP) \
+ /* Binary arithmetic operator for all CLASS##Numeric operations. */ \
+ template <typename L, typename R, \
+ typename std::enable_if<Is##CLASS##Op<L, R>::value>::type * = nullptr> \
+ constexpr CLASS##Numeric<typename MathWrapper<CLASS##OP_NAME##Op, L, R>::type> operator OP( \
+ const L lhs, const R rhs) \
+ { \
+ return decltype(lhs OP rhs)::template MathOp<CLASS##OP_NAME##Op>(lhs, rhs); \
+ } \
+ /* Assignment arithmetic operator implementation from CLASS##Numeric. */ \
+ template <typename L> \
+ template <typename R> \
+ constexpr CLASS##Numeric<L> &CLASS##Numeric<L>::operator CMP_OP(const R rhs) \
+ { \
+ return MathOp<CLASS##OP_NAME##Op>(rhs); \
+ } \
+ /* Variadic arithmetic functions that return CLASS##Numeric. */ \
+ BASE_NUMERIC_ARITHMETIC_VARIADIC(CLASS, CL_ABBR, OP_NAME)
+
+} // namespace internal
+} // namespace base
+} // namespace angle
+
+#endif // BASE_NUMERICS_SAFE_MATH_SHARED_IMPL_H_
diff --git a/gfx/angle/checkout/src/common/third_party/base/anglebase/sha1.cc b/gfx/angle/checkout/src/common/third_party/base/anglebase/sha1.cc
new file mode 100644
index 0000000000..cb88ba06e1
--- /dev/null
+++ b/gfx/angle/checkout/src/common/third_party/base/anglebase/sha1.cc
@@ -0,0 +1,245 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "anglebase/sha1.h"
+
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+
+#include "anglebase/sys_byteorder.h"
+
+namespace angle
+{
+
+namespace base
+{
+
+// Implementation of SHA-1. Only handles data in byte-sized blocks,
+// which simplifies the code a fair bit.
+
+// Identifier names follow notation in FIPS PUB 180-3, where you'll
+// also find a description of the algorithm:
+// http://csrc.nist.gov/publications/fips/fips180-3/fips180-3_final.pdf
+
+// Usage example:
+//
+// SecureHashAlgorithm sha;
+// while(there is data to hash)
+// sha.Update(moredata, size of data);
+// sha.Final();
+// memcpy(somewhere, sha.Digest(), 20);
+//
+// to reuse the instance of sha, call sha.Init();
+
+// TODO(jhawkins): Replace this implementation with a per-platform
+// implementation using each platform's crypto library. See
+// http://crbug.com/47218
+
+class SecureHashAlgorithm
+{
+ public:
+ SecureHashAlgorithm() { Init(); }
+
+ static const int kDigestSizeBytes;
+
+ void Init();
+ void Update(const void *data, size_t nbytes);
+ void Final();
+
+ // 20 bytes of message digest.
+ const unsigned char *Digest() const { return reinterpret_cast<const unsigned char *>(H); }
+
+ private:
+ void Pad();
+ void Process();
+
+ uint32_t A, B, C, D, E;
+
+ uint32_t H[5];
+
+ union {
+ uint32_t W[80];
+ uint8_t M[64];
+ };
+
+ uint32_t cursor;
+ uint64_t l;
+};
+
+static inline uint32_t f(uint32_t t, uint32_t B, uint32_t C, uint32_t D)
+{
+ if (t < 20)
+ {
+ return (B & C) | ((~B) & D);
+ }
+ else if (t < 40)
+ {
+ return B ^ C ^ D;
+ }
+ else if (t < 60)
+ {
+ return (B & C) | (B & D) | (C & D);
+ }
+ else
+ {
+ return B ^ C ^ D;
+ }
+}
+
+static inline uint32_t S(uint32_t n, uint32_t X)
+{
+ return (X << n) | (X >> (32 - n));
+}
+
+static inline uint32_t K(uint32_t t)
+{
+ if (t < 20)
+ {
+ return 0x5a827999;
+ }
+ else if (t < 40)
+ {
+ return 0x6ed9eba1;
+ }
+ else if (t < 60)
+ {
+ return 0x8f1bbcdc;
+ }
+ else
+ {
+ return 0xca62c1d6;
+ }
+}
+
+const int SecureHashAlgorithm::kDigestSizeBytes = 20;
+
+void SecureHashAlgorithm::Init()
+{
+ A = 0;
+ B = 0;
+ C = 0;
+ D = 0;
+ E = 0;
+ cursor = 0;
+ l = 0;
+ H[0] = 0x67452301;
+ H[1] = 0xefcdab89;
+ H[2] = 0x98badcfe;
+ H[3] = 0x10325476;
+ H[4] = 0xc3d2e1f0;
+}
+
+void SecureHashAlgorithm::Final()
+{
+ Pad();
+ Process();
+
+ for (int t = 0; t < 5; ++t)
+ H[t] = ByteSwap(H[t]);
+}
+
+void SecureHashAlgorithm::Update(const void *data, size_t nbytes)
+{
+ const uint8_t *d = reinterpret_cast<const uint8_t *>(data);
+ while (nbytes--)
+ {
+ M[cursor++] = *d++;
+ if (cursor >= 64)
+ Process();
+ l += 8;
+ }
+}
+
+void SecureHashAlgorithm::Pad()
+{
+ M[cursor++] = 0x80;
+
+ if (cursor > 64 - 8)
+ {
+ // pad out to next block
+ while (cursor < 64)
+ M[cursor++] = 0;
+
+ Process();
+ }
+
+ while (cursor < 64 - 8)
+ M[cursor++] = 0;
+
+ M[cursor++] = (l >> 56) & 0xff;
+ M[cursor++] = (l >> 48) & 0xff;
+ M[cursor++] = (l >> 40) & 0xff;
+ M[cursor++] = (l >> 32) & 0xff;
+ M[cursor++] = (l >> 24) & 0xff;
+ M[cursor++] = (l >> 16) & 0xff;
+ M[cursor++] = (l >> 8) & 0xff;
+ M[cursor++] = l & 0xff;
+}
+
+void SecureHashAlgorithm::Process()
+{
+ uint32_t t;
+
+ // Each a...e corresponds to a section in the FIPS 180-3 algorithm.
+
+ // a.
+ //
+ // W and M are in a union, so no need to memcpy.
+ // memcpy(W, M, sizeof(M));
+ for (t = 0; t < 16; ++t)
+ W[t] = ByteSwap(W[t]);
+
+ // b.
+ for (t = 16; t < 80; ++t)
+ W[t] = S(1, W[t - 3] ^ W[t - 8] ^ W[t - 14] ^ W[t - 16]);
+
+ // c.
+ A = H[0];
+ B = H[1];
+ C = H[2];
+ D = H[3];
+ E = H[4];
+
+ // d.
+ for (t = 0; t < 80; ++t)
+ {
+ uint32_t TEMP = S(5, A) + f(t, B, C, D) + E + W[t] + K(t);
+ E = D;
+ D = C;
+ C = S(30, B);
+ B = A;
+ A = TEMP;
+ }
+
+ // e.
+ H[0] += A;
+ H[1] += B;
+ H[2] += C;
+ H[3] += D;
+ H[4] += E;
+
+ cursor = 0;
+}
+
+std::string SHA1HashString(const std::string &str)
+{
+ char hash[SecureHashAlgorithm::kDigestSizeBytes];
+ SHA1HashBytes(reinterpret_cast<const unsigned char *>(str.c_str()), str.length(),
+ reinterpret_cast<unsigned char *>(hash));
+ return std::string(hash, SecureHashAlgorithm::kDigestSizeBytes);
+}
+
+void SHA1HashBytes(const unsigned char *data, size_t len, unsigned char *hash)
+{
+ SecureHashAlgorithm sha;
+ sha.Update(data, len);
+ sha.Final();
+
+ memcpy(hash, sha.Digest(), SecureHashAlgorithm::kDigestSizeBytes);
+}
+
+} // namespace base
+
+} // namespace angle
diff --git a/gfx/angle/checkout/src/common/third_party/base/anglebase/sha1.h b/gfx/angle/checkout/src/common/third_party/base/anglebase/sha1.h
new file mode 100644
index 0000000000..a60908814f
--- /dev/null
+++ b/gfx/angle/checkout/src/common/third_party/base/anglebase/sha1.h
@@ -0,0 +1,36 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef ANGLEBASE_SHA1_H_
+#define ANGLEBASE_SHA1_H_
+
+#include <stddef.h>
+
+#include <string>
+
+#include "anglebase/base_export.h"
+
+namespace angle
+{
+
+namespace base
+{
+
+// These functions perform SHA-1 operations.
+
+static const size_t kSHA1Length = 20; // Length in bytes of a SHA-1 hash.
+
+// Computes the SHA-1 hash of the input string |str| and returns the full
+// hash.
+ANGLEBASE_EXPORT std::string SHA1HashString(const std::string &str);
+
+// Computes the SHA-1 hash of the |len| bytes in |data| and puts the hash
+// in |hash|. |hash| must be kSHA1Length bytes long.
+ANGLEBASE_EXPORT void SHA1HashBytes(const unsigned char *data, size_t len, unsigned char *hash);
+
+} // namespace base
+
+} // namespace angle
+
+#endif // ANGLEBASE_SHA1_H_
diff --git a/gfx/angle/checkout/src/common/third_party/base/anglebase/sys_byteorder.h b/gfx/angle/checkout/src/common/third_party/base/anglebase/sys_byteorder.h
new file mode 100644
index 0000000000..70d9c275e6
--- /dev/null
+++ b/gfx/angle/checkout/src/common/third_party/base/anglebase/sys_byteorder.h
@@ -0,0 +1,49 @@
+//
+// Copyright 2017 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// sys_byteorder.h: Compatiblity hacks for importing Chromium's base/SHA1.
+
+#ifndef ANGLEBASE_SYS_BYTEORDER_H_
+#define ANGLEBASE_SYS_BYTEORDER_H_
+
+namespace angle
+{
+
+namespace base
+{
+
+// Returns a value with all bytes in |x| swapped, i.e. reverses the endianness.
+inline uint16_t ByteSwap(uint16_t x)
+{
+#if defined(_MSC_VER)
+ return _byteswap_ushort(x);
+#else
+ return __builtin_bswap16(x);
+#endif
+}
+
+inline uint32_t ByteSwap(uint32_t x)
+{
+#if defined(_MSC_VER)
+ return _byteswap_ulong(x);
+#else
+ return __builtin_bswap32(x);
+#endif
+}
+
+inline uint64_t ByteSwap(uint64_t x)
+{
+#if defined(_MSC_VER)
+ return _byteswap_uint64(x);
+#else
+ return __builtin_bswap64(x);
+#endif
+}
+
+} // namespace base
+
+} // namespace angle
+
+#endif // ANGLEBASE_SYS_BYTEORDER_H_
diff --git a/gfx/angle/checkout/src/common/third_party/smhasher/src/PMurHash.cpp b/gfx/angle/checkout/src/common/third_party/smhasher/src/PMurHash.cpp
new file mode 100644
index 0000000000..379e5ce3d5
--- /dev/null
+++ b/gfx/angle/checkout/src/common/third_party/smhasher/src/PMurHash.cpp
@@ -0,0 +1,339 @@
+/*-----------------------------------------------------------------------------
+ * MurmurHash3 was written by Austin Appleby, and is placed in the public
+ * domain.
+ *
+ * This implementation was written by Shane Day, and is also public domain.
+ *
+ * This is a portable ANSI C implementation of MurmurHash3_x86_32 (Murmur3A)
+ * with support for progressive processing.
+ */
+
+/*-----------------------------------------------------------------------------
+
+If you want to understand the MurmurHash algorithm you would be much better
+off reading the original source. Just point your browser at:
+http://code.google.com/p/smhasher/source/browse/trunk/MurmurHash3.cpp
+
+
+What this version provides?
+
+1. Progressive data feeding. Useful when the entire payload to be hashed
+does not fit in memory or when the data is streamed through the application.
+Also useful when hashing a number of strings with a common prefix. A partial
+hash of a prefix string can be generated and reused for each suffix string.
+
+2. Portability. Plain old C so that it should compile on any old compiler.
+Both CPU endian and access-alignment neutral, but avoiding inefficient code
+when possible depending on CPU capabilities.
+
+3. Drop in. I personally like nice self contained public domain code, making it
+easy to pilfer without loads of refactoring to work properly in the existing
+application code & makefile structure and mucking around with licence files.
+Just copy PMurHash.h and PMurHash.c and you're ready to go.
+
+
+How does it work?
+
+We can only process entire 32 bit chunks of input, except for the very end
+that may be shorter. So along with the partial hash we need to give back to
+the caller a carry containing up to 3 bytes that we were unable to process.
+This carry also needs to record the number of bytes the carry holds. I use
+the low 2 bits as a count (0..3) and the carry bytes are shifted into the
+high byte in stream order.
+
+To handle endianess I simply use a macro that reads a uint32_t and define
+that macro to be a direct read on little endian machines, a read and swap
+on big endian machines, or a byte-by-byte read if the endianess is unknown.
+
+-----------------------------------------------------------------------------*/
+
+#include "PMurHash.h"
+#include <stdint.h>
+
+/* I used ugly type names in the header to avoid potential conflicts with
+ * application or system typedefs & defines. Since I'm not including any more
+ * headers below here I can rename these so that the code reads like C99 */
+#undef uint32_t
+#define uint32_t MH_UINT32
+#undef uint8_t
+#define uint8_t MH_UINT8
+
+/* MSVC warnings we choose to ignore */
+#if defined(_MSC_VER)
+# pragma warning(disable : 4127) /* conditional expression is constant */
+#endif
+
+/*-----------------------------------------------------------------------------
+ * Endianess, misalignment capabilities and util macros
+ *
+ * The following 3 macros are defined in this section. The other macros defined
+ * are only needed to help derive these 3.
+ *
+ * READ_UINT32(x) Read a little endian unsigned 32-bit int
+ * UNALIGNED_SAFE Defined if READ_UINT32 works on non-word boundaries
+ * ROTL32(x,r) Rotate x left by r bits
+ */
+
+/* Convention is to define __BYTE_ORDER == to one of these values */
+#if !defined(__BIG_ENDIAN)
+# define __BIG_ENDIAN 4321
+#endif
+#if !defined(__LITTLE_ENDIAN)
+# define __LITTLE_ENDIAN 1234
+#endif
+
+/* I386 */
+#if defined(_M_IX86) || defined(__i386__) || defined(__i386) || defined(i386)
+# define __BYTE_ORDER __LITTLE_ENDIAN
+# define UNALIGNED_SAFE
+#endif
+
+/* gcc 'may' define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ to 1 (Note the trailing __),
+ * or even _LITTLE_ENDIAN or _BIG_ENDIAN (Note the single _ prefix) */
+#if !defined(__BYTE_ORDER)
+# if defined(__LITTLE_ENDIAN__) && __LITTLE_ENDIAN__ == 1 || \
+ defined(_LITTLE_ENDIAN) && _LITTLE_ENDIAN == 1
+# define __BYTE_ORDER __LITTLE_ENDIAN
+# elif defined(__BIG_ENDIAN__) && __BIG_ENDIAN__ == 1 || defined(_BIG_ENDIAN) && _BIG_ENDIAN == 1
+# define __BYTE_ORDER __BIG_ENDIAN
+# endif
+#endif
+
+/* gcc (usually) defines xEL/EB macros for ARM and MIPS endianess */
+#if !defined(__BYTE_ORDER)
+# if defined(__ARMEL__) || defined(__MIPSEL__)
+# define __BYTE_ORDER __LITTLE_ENDIAN
+# endif
+# if defined(__ARMEB__) || defined(__MIPSEB__)
+# define __BYTE_ORDER __BIG_ENDIAN
+# endif
+#endif
+
+/* Now find best way we can to READ_UINT32 */
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+/* CPU endian matches murmurhash algorithm, so read 32-bit word directly */
+# define READ_UINT32(ptr) (*((uint32_t *)(ptr)))
+#elif __BYTE_ORDER == __BIG_ENDIAN
+/* TODO: Add additional cases below where a compiler provided bswap32 is available */
+# if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))
+# define READ_UINT32(ptr) (__builtin_bswap32(*((uint32_t *)(ptr))))
+# else
+/* Without a known fast bswap32 we're just as well off doing this */
+# define READ_UINT32(ptr) (ptr[0] | ptr[1] << 8 | ptr[2] << 16 | ptr[3] << 24)
+# define UNALIGNED_SAFE
+# endif
+#else
+/* Unknown endianess so last resort is to read individual bytes */
+# define READ_UINT32(ptr) (ptr[0] | ptr[1] << 8 | ptr[2] << 16 | ptr[3] << 24)
+
+/* Since we're not doing word-reads we can skip the messing about with realignment */
+# define UNALIGNED_SAFE
+#endif
+
+/* Find best way to ROTL32 */
+#if defined(_MSC_VER)
+# include <stdlib.h> /* Microsoft put _rotl declaration in here */
+# define ROTL32(x, r) _rotl(x, r)
+#else
+/* gcc recognises this code and generates a rotate instruction for CPUs with one */
+# define ROTL32(x, r) (((uint32_t)x << r) | ((uint32_t)x >> (32 - r)))
+#endif
+
+/*-----------------------------------------------------------------------------
+ * Core murmurhash algorithm macros */
+
+#define C1 (0xcc9e2d51)
+#define C2 (0x1b873593)
+
+/* This is the main processing body of the algorithm. It operates
+ * on each full 32-bits of input. */
+#define DOBLOCK(h1, k1) \
+ do \
+ { \
+ k1 *= C1; \
+ k1 = ROTL32(k1, 15); \
+ k1 *= C2; \
+ \
+ h1 ^= k1; \
+ h1 = ROTL32(h1, 13); \
+ h1 = h1 * 5 + 0xe6546b64; \
+ } while (0)
+
+/* Append unaligned bytes to carry, forcing hash churn if we have 4 bytes */
+/* cnt=bytes to process, h1=name of h1 var, c=carry, n=bytes in c, ptr/len=payload */
+#define DOBYTES(cnt, h1, c, n, ptr, len) \
+ do \
+ { \
+ int _i = cnt; \
+ while (_i--) \
+ { \
+ c = c >> 8 | *ptr++ << 24; \
+ n++; \
+ len--; \
+ if (n == 4) \
+ { \
+ DOBLOCK(h1, c); \
+ n = 0; \
+ } \
+ } \
+ } while (0)
+
+/*---------------------------------------------------------------------------*/
+
+namespace angle
+{
+/* Main hashing function. Initialise carry to 0 and h1 to 0 or an initial seed
+ * if wanted. Both ph1 and pcarry are required arguments. */
+void PMurHash32_Process(uint32_t *ph1, uint32_t *pcarry, const void *key, int len)
+{
+ uint32_t h1 = *ph1;
+ uint32_t c = *pcarry;
+
+ const uint8_t *ptr = (uint8_t *)key;
+ const uint8_t *end;
+
+ /* Extract carry count from low 2 bits of c value */
+ int n = c & 3;
+
+#if defined(UNALIGNED_SAFE)
+ /* This CPU handles unaligned word access */
+
+ /* Consume any carry bytes */
+ int i = (4 - n) & 3;
+ if (i && i <= len)
+ {
+ DOBYTES(i, h1, c, n, ptr, len);
+ }
+
+ /* Process 32-bit chunks */
+ end = ptr + len / 4 * 4;
+ for (; ptr < end; ptr += 4)
+ {
+ uint32_t k1 = READ_UINT32(ptr);
+ DOBLOCK(h1, k1);
+ }
+
+#else /*UNALIGNED_SAFE*/
+ /* This CPU does not handle unaligned word access */
+
+ /* Consume enough so that the next data byte is word aligned */
+ int i = -(intptr_t)ptr & 3;
+ if (i && i <= len)
+ {
+ DOBYTES(i, h1, c, n, ptr, len);
+ }
+
+ /* We're now aligned. Process in aligned blocks. Specialise for each possible carry count */
+ end = ptr + len / 4 * 4;
+ switch (n)
+ { /* how many bytes in c */
+ case 0: /* c=[----] w=[3210] b=[3210]=w c'=[----] */
+ for (; ptr < end; ptr += 4)
+ {
+ uint32_t k1 = READ_UINT32(ptr);
+ DOBLOCK(h1, k1);
+ }
+ break;
+ case 1: /* c=[0---] w=[4321] b=[3210]=c>>24|w<<8 c'=[4---] */
+ for (; ptr < end; ptr += 4)
+ {
+ uint32_t k1 = c >> 24;
+ c = READ_UINT32(ptr);
+ k1 |= c << 8;
+ DOBLOCK(h1, k1);
+ }
+ break;
+ case 2: /* c=[10--] w=[5432] b=[3210]=c>>16|w<<16 c'=[54--] */
+ for (; ptr < end; ptr += 4)
+ {
+ uint32_t k1 = c >> 16;
+ c = READ_UINT32(ptr);
+ k1 |= c << 16;
+ DOBLOCK(h1, k1);
+ }
+ break;
+ case 3: /* c=[210-] w=[6543] b=[3210]=c>>8|w<<24 c'=[654-] */
+ for (; ptr < end; ptr += 4)
+ {
+ uint32_t k1 = c >> 8;
+ c = READ_UINT32(ptr);
+ k1 |= c << 24;
+ DOBLOCK(h1, k1);
+ }
+ }
+#endif /*UNALIGNED_SAFE*/
+
+ /* Advance over whole 32-bit chunks, possibly leaving 1..3 bytes */
+ len -= len / 4 * 4;
+
+ /* Append any remaining bytes into carry */
+ DOBYTES(len, h1, c, n, ptr, len);
+
+ /* Copy out new running hash and carry */
+ *ph1 = h1;
+ *pcarry = (c & ~0xff) | n;
+}
+
+/*---------------------------------------------------------------------------*/
+
+/* Finalize a hash. To match the original Murmur3A the total_length must be provided */
+uint32_t PMurHash32_Result(uint32_t h, uint32_t carry, uint32_t total_length)
+{
+ uint32_t k1;
+ int n = carry & 3;
+ if (n)
+ {
+ k1 = carry >> (4 - n) * 8;
+ k1 *= C1;
+ k1 = ROTL32(k1, 15);
+ k1 *= C2;
+ h ^= k1;
+ }
+ h ^= total_length;
+
+ /* fmix */
+ h ^= h >> 16;
+ h *= 0x85ebca6b;
+ h ^= h >> 13;
+ h *= 0xc2b2ae35;
+ h ^= h >> 16;
+
+ return h;
+}
+
+/*---------------------------------------------------------------------------*/
+
+/* Murmur3A compatable all-at-once */
+uint32_t PMurHash32(uint32_t seed, const void *key, int len)
+{
+ uint32_t h1 = seed, carry = 0;
+ PMurHash32_Process(&h1, &carry, key, len);
+ return PMurHash32_Result(h1, carry, len);
+}
+
+/*---------------------------------------------------------------------------*/
+
+/* Provide an API suitable for smhasher */
+void PMurHash32_test(const void *key, int len, uint32_t seed, void *out)
+{
+ uint32_t h1 = seed, carry = 0;
+ const uint8_t *ptr = (uint8_t *)key;
+ const uint8_t *end = ptr + len;
+
+#if 0 /* Exercise the progressive processing */
+ while(ptr < end) {
+ //const uint8_t *mid = ptr + rand()%(end-ptr)+1;
+ const uint8_t *mid = ptr + (rand()&0xF);
+ mid = mid<end?mid:end;
+ PMurHash32_Process(&h1, &carry, ptr, mid-ptr);
+ ptr = mid;
+ }
+#else
+ PMurHash32_Process(&h1, &carry, ptr, (int)(end - ptr));
+#endif
+ h1 = PMurHash32_Result(h1, carry, len);
+ *(uint32_t *)out = h1;
+}
+} // namespace angle
+
+/*---------------------------------------------------------------------------*/
diff --git a/gfx/angle/checkout/src/common/third_party/smhasher/src/PMurHash.h b/gfx/angle/checkout/src/common/third_party/smhasher/src/PMurHash.h
new file mode 100644
index 0000000000..0a3c96fa14
--- /dev/null
+++ b/gfx/angle/checkout/src/common/third_party/smhasher/src/PMurHash.h
@@ -0,0 +1,57 @@
+/*-----------------------------------------------------------------------------
+ * MurmurHash3 was written by Austin Appleby, and is placed in the public
+ * domain.
+ *
+ * This implementation was written by Shane Day, and is also public domain.
+ *
+ * This is a portable ANSI C implementation of MurmurHash3_x86_32 (Murmur3A)
+ * with support for progressive processing.
+ */
+
+/* ------------------------------------------------------------------------- */
+/* Determine what native type to use for uint32_t */
+
+/* We can't use the name 'uint32_t' here because it will conflict with
+ * any version provided by the system headers or application. */
+
+/* First look for special cases */
+#if defined(_MSC_VER)
+# define MH_UINT32 unsigned long
+#endif
+
+/* If the compiler says it's C99 then take its word for it */
+#if !defined(MH_UINT32) && (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L)
+# include <stdint.h>
+# define MH_UINT32 uint32_t
+#endif
+
+/* Otherwise try testing against max value macros from limit.h */
+#if !defined(MH_UINT32)
+# include <limits.h>
+# if (USHRT_MAX == 0xffffffffUL)
+# define MH_UINT32 unsigned short
+# elif (UINT_MAX == 0xffffffffUL)
+# define MH_UINT32 unsigned int
+# elif (ULONG_MAX == 0xffffffffUL)
+# define MH_UINT32 unsigned long
+# endif
+#endif
+
+#if !defined(MH_UINT32)
+# error Unable to determine type name for unsigned 32-bit int
+#endif
+
+/* I'm yet to work on a platform where 'unsigned char' is not 8 bits */
+#define MH_UINT8 unsigned char
+
+/* ------------------------------------------------------------------------- */
+/* Prototypes */
+
+namespace angle
+{
+void PMurHash32_Process(MH_UINT32 *ph1, MH_UINT32 *pcarry, const void *key, int len);
+MH_UINT32 PMurHash32_Result(MH_UINT32 h1, MH_UINT32 carry, MH_UINT32 total_length);
+MH_UINT32 PMurHash32(MH_UINT32 seed, const void *key, int len);
+
+void PMurHash32_test(const void *key, int len, MH_UINT32 seed, void *out);
+} // namespace angle
diff --git a/gfx/angle/checkout/src/common/third_party/xxhash/xxhash.c b/gfx/angle/checkout/src/common/third_party/xxhash/xxhash.c
new file mode 100644
index 0000000000..ae9a55116c
--- /dev/null
+++ b/gfx/angle/checkout/src/common/third_party/xxhash/xxhash.c
@@ -0,0 +1,1030 @@
+/*
+* xxHash - Fast Hash algorithm
+* Copyright (C) 2012-2016, Yann Collet
+*
+* BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following disclaimer
+* in the documentation and/or other materials provided with the
+* distribution.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*
+* You can contact the author at :
+* - xxHash homepage: http://www.xxhash.com
+* - xxHash source repository : https://github.com/Cyan4973/xxHash
+*/
+
+
+/* *************************************
+* Tuning parameters
+***************************************/
+/*!XXH_FORCE_MEMORY_ACCESS :
+ * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
+ * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
+ * The below switch allow to select different access method for improved performance.
+ * Method 0 (default) : use `memcpy()`. Safe and portable.
+ * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
+ * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
+ * Method 2 : direct access. This method doesn't depend on compiler but violate C standard.
+ * It can generate buggy code on targets which do not support unaligned memory accesses.
+ * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
+ * See http://stackoverflow.com/a/32095106/646947 for details.
+ * Prefer these methods in priority order (0 > 1 > 2)
+ */
+#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
+# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \
+ || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) \
+ || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
+# define XXH_FORCE_MEMORY_ACCESS 2
+# elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || \
+ (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \
+ || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \
+ || defined(__ARM_ARCH_7S__) ))
+# define XXH_FORCE_MEMORY_ACCESS 1
+# endif
+#endif
+
+/*!XXH_ACCEPT_NULL_INPUT_POINTER :
+ * If input pointer is NULL, xxHash default behavior is to dereference it, triggering a segfault.
+ * When this macro is enabled, xxHash actively checks input for null pointer.
+ * It it is, result for null input pointers is the same as a null-length input.
+ */
+#ifndef XXH_ACCEPT_NULL_INPUT_POINTER /* can be defined externally */
+# define XXH_ACCEPT_NULL_INPUT_POINTER 0
+#endif
+
+/*!XXH_FORCE_NATIVE_FORMAT :
+ * By default, xxHash library provides endian-independent Hash values, based on little-endian convention.
+ * Results are therefore identical for little-endian and big-endian CPU.
+ * This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format.
+ * Should endian-independence be of no importance for your application, you may set the #define below to 1,
+ * to improve speed for Big-endian CPU.
+ * This option has no impact on Little_Endian CPU.
+ */
+#ifndef XXH_FORCE_NATIVE_FORMAT /* can be defined externally */
+# define XXH_FORCE_NATIVE_FORMAT 0
+#endif
+
+/*!XXH_FORCE_ALIGN_CHECK :
+ * This is a minor performance trick, only useful with lots of very small keys.
+ * It means : check for aligned/unaligned input.
+ * The check costs one initial branch per hash;
+ * set it to 0 when the input is guaranteed to be aligned,
+ * or when alignment doesn't matter for performance.
+ */
+#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */
+# if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
+# define XXH_FORCE_ALIGN_CHECK 0
+# else
+# define XXH_FORCE_ALIGN_CHECK 1
+# endif
+#endif
+
+
+/* *************************************
+* Includes & Memory related functions
+***************************************/
+/*! Modify the local functions below should you wish to use some other memory routines
+* for malloc(), free() */
+#include <stdlib.h>
+static void* XXH_malloc(size_t s) { return malloc(s); }
+static void XXH_free (void* p) { free(p); }
+/*! and for memcpy() */
+#include <string.h>
+static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); }
+
+#include <assert.h> /* assert */
+
+#define XXH_STATIC_LINKING_ONLY
+#include "xxhash.h"
+
+
+/* *************************************
+* Compiler Specific Options
+***************************************/
+#ifdef _MSC_VER /* Visual Studio */
+# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
+# define FORCE_INLINE static __forceinline
+#else
+# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
+# ifdef __GNUC__
+# define FORCE_INLINE static inline __attribute__((always_inline))
+# else
+# define FORCE_INLINE static inline
+# endif
+# else
+# define FORCE_INLINE static
+# endif /* __STDC_VERSION__ */
+#endif
+
+
+/* *************************************
+* Basic Types
+***************************************/
+#ifndef MEM_MODULE
+# if !defined (__VMS) \
+ && (defined (__cplusplus) \
+ || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
+# include <stdint.h>
+ typedef uint8_t BYTE;
+ typedef uint16_t U16;
+ typedef uint32_t U32;
+# else
+ typedef unsigned char BYTE;
+ typedef unsigned short U16;
+ typedef unsigned int U32;
+# endif
+#endif
+
+#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
+
+/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
+static U32 XXH_read32(const void* memPtr) { return *(const U32*) memPtr; }
+
+#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
+
+/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
+/* currently only defined for gcc and icc */
+typedef union { U32 u32; } __attribute__((packed)) unalign;
+static U32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
+
+#else
+
+/* portable and safe solution. Generally efficient.
+ * see : http://stackoverflow.com/a/32095106/646947
+ */
+static U32 XXH_read32(const void* memPtr)
+{
+ U32 val;
+ memcpy(&val, memPtr, sizeof(val));
+ return val;
+}
+
+#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
+
+
+/* ****************************************
+* Compiler-specific Functions and Macros
+******************************************/
+#define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
+
+/* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */
+#if defined(_MSC_VER)
+# define XXH_rotl32(x,r) _rotl(x,r)
+# define XXH_rotl64(x,r) _rotl64(x,r)
+#else
+# define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r)))
+# define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r)))
+#endif
+
+#if defined(_MSC_VER) /* Visual Studio */
+# define XXH_swap32 _byteswap_ulong
+#elif XXH_GCC_VERSION >= 403
+# define XXH_swap32 __builtin_bswap32
+#else
+static U32 XXH_swap32 (U32 x)
+{
+ return ((x << 24) & 0xff000000 ) |
+ ((x << 8) & 0x00ff0000 ) |
+ ((x >> 8) & 0x0000ff00 ) |
+ ((x >> 24) & 0x000000ff );
+}
+#endif
+
+
+/* *************************************
+* Architecture Macros
+***************************************/
+typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess;
+
+/* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */
+#ifndef XXH_CPU_LITTLE_ENDIAN
+static int XXH_isLittleEndian(void)
+{
+ const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
+ return one.c[0];
+}
+# define XXH_CPU_LITTLE_ENDIAN (XXH_isLittleEndian())
+#endif
+
+
+/* ***************************
+* Memory reads
+*****************************/
+typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment;
+
+FORCE_INLINE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
+{
+ if (align==XXH_unaligned)
+ return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
+ else
+ return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr);
+}
+
+FORCE_INLINE U32 XXH_readLE32(const void* ptr, XXH_endianess endian)
+{
+ return XXH_readLE32_align(ptr, endian, XXH_unaligned);
+}
+
+static U32 XXH_readBE32(const void* ptr)
+{
+ return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr);
+}
+
+
+/* *************************************
+* Macros
+***************************************/
+#define XXH_STATIC_ASSERT(c) do { enum { XXH_sa = 1/(int)(!!(c)) }; } while(0) /* use after variable declarations */
+XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
+
+
+/* *******************************************************************
+* 32-bit hash functions
+*********************************************************************/
+static const U32 PRIME32_1 = 2654435761U; /* 0b10011110001101110111100110110001 */
+static const U32 PRIME32_2 = 2246822519U; /* 0b10000101111010111100101001110111 */
+static const U32 PRIME32_3 = 3266489917U; /* 0b11000010101100101010111000111101 */
+static const U32 PRIME32_4 = 668265263U; /* 0b00100111110101001110101100101111 */
+static const U32 PRIME32_5 = 374761393U; /* 0b00010110010101100110011110110001 */
+
+static U32 XXH32_round(U32 seed, U32 input)
+{
+ seed += input * PRIME32_2;
+ seed = XXH_rotl32(seed, 13);
+ seed *= PRIME32_1;
+ return seed;
+}
+
+/* mix all bits */
+static U32 XXH32_avalanche(U32 h32)
+{
+ h32 ^= h32 >> 15;
+ h32 *= PRIME32_2;
+ h32 ^= h32 >> 13;
+ h32 *= PRIME32_3;
+ h32 ^= h32 >> 16;
+ return(h32);
+}
+
+#define XXH_get32bits(p) XXH_readLE32_align(p, endian, align)
+
+static U32
+XXH32_finalize(U32 h32, const void* ptr, size_t len,
+ XXH_endianess endian, XXH_alignment align)
+
+{
+ const BYTE* p = (const BYTE*)ptr;
+
+#define PROCESS1 \
+ h32 += (*p++) * PRIME32_5; \
+ h32 = XXH_rotl32(h32, 11) * PRIME32_1
+
+#define PROCESS4 \
+ h32 += XXH_get32bits(p) * PRIME32_3; \
+ p+=4; \
+ h32 = XXH_rotl32(h32, 17) * PRIME32_4
+
+ switch(len&15) /* or switch(bEnd - p) */
+ {
+ case 12: PROCESS4;
+ /* fallthrough */
+ case 8: PROCESS4;
+ /* fallthrough */
+ case 4: PROCESS4;
+ return XXH32_avalanche(h32);
+
+ case 13: PROCESS4;
+ /* fallthrough */
+ case 9: PROCESS4;
+ /* fallthrough */
+ case 5: PROCESS4;
+ PROCESS1;
+ return XXH32_avalanche(h32);
+
+ case 14: PROCESS4;
+ /* fallthrough */
+ case 10: PROCESS4;
+ /* fallthrough */
+ case 6: PROCESS4;
+ PROCESS1;
+ PROCESS1;
+ return XXH32_avalanche(h32);
+
+ case 15: PROCESS4;
+ /* fallthrough */
+ case 11: PROCESS4;
+ /* fallthrough */
+ case 7: PROCESS4;
+ /* fallthrough */
+ case 3: PROCESS1;
+ /* fallthrough */
+ case 2: PROCESS1;
+ /* fallthrough */
+ case 1: PROCESS1;
+ /* fallthrough */
+ case 0: return XXH32_avalanche(h32);
+ }
+ assert(0);
+ return h32; /* reaching this point is deemed impossible */
+}
+
+
+FORCE_INLINE U32
+XXH32_endian_align(const void* input, size_t len, U32 seed,
+ XXH_endianess endian, XXH_alignment align)
+{
+ const BYTE* p = (const BYTE*)input;
+ const BYTE* bEnd = p + len;
+ U32 h32;
+
+#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
+ if (p==NULL) {
+ len=0;
+ bEnd=p=(const BYTE*)(size_t)16;
+ }
+#endif
+
+ if (len>=16) {
+ const BYTE* const limit = bEnd - 15;
+ U32 v1 = seed + PRIME32_1 + PRIME32_2;
+ U32 v2 = seed + PRIME32_2;
+ U32 v3 = seed + 0;
+ U32 v4 = seed - PRIME32_1;
+
+ do {
+ v1 = XXH32_round(v1, XXH_get32bits(p)); p+=4;
+ v2 = XXH32_round(v2, XXH_get32bits(p)); p+=4;
+ v3 = XXH32_round(v3, XXH_get32bits(p)); p+=4;
+ v4 = XXH32_round(v4, XXH_get32bits(p)); p+=4;
+ } while (p < limit);
+
+ h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7)
+ + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
+ } else {
+ h32 = seed + PRIME32_5;
+ }
+
+ h32 += (U32)len;
+
+ return XXH32_finalize(h32, p, len&15, endian, align);
+}
+
+
+XXH_PUBLIC_API unsigned int XXH32 (const void* input, size_t len, unsigned int seed)
+{
+#if 0
+ /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
+ XXH32_state_t state;
+ XXH32_reset(&state, seed);
+ XXH32_update(&state, input, len);
+ return XXH32_digest(&state);
+#else
+ XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
+
+ if (XXH_FORCE_ALIGN_CHECK) {
+ if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */
+ if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+ return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
+ else
+ return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
+ } }
+
+ if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+ return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
+ else
+ return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
+#endif
+}
+
+
+
+/*====== Hash streaming ======*/
+
+XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void)
+{
+ return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t));
+}
+XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr)
+{
+ XXH_free(statePtr);
+ return XXH_OK;
+}
+
+XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState)
+{
+ memcpy(dstState, srcState, sizeof(*dstState));
+}
+
+XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int seed)
+{
+ XXH32_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
+ memset(&state, 0, sizeof(state));
+ state.v1 = seed + PRIME32_1 + PRIME32_2;
+ state.v2 = seed + PRIME32_2;
+ state.v3 = seed + 0;
+ state.v4 = seed - PRIME32_1;
+ /* do not write into reserved, planned to be removed in a future version */
+ memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved));
+ return XXH_OK;
+}
+
+
+FORCE_INLINE XXH_errorcode
+XXH32_update_endian(XXH32_state_t* state, const void* input, size_t len, XXH_endianess endian)
+{
+ if (input==NULL)
+#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
+ return XXH_OK;
+#else
+ return XXH_ERROR;
+#endif
+
+ { const BYTE* p = (const BYTE*)input;
+ const BYTE* const bEnd = p + len;
+
+ state->total_len_32 += (unsigned)len;
+ state->large_len |= (len>=16) | (state->total_len_32>=16);
+
+ if (state->memsize + len < 16) { /* fill in tmp buffer */
+ XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len);
+ state->memsize += (unsigned)len;
+ return XXH_OK;
+ }
+
+ if (state->memsize) { /* some data left from previous update */
+ XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize);
+ { const U32* p32 = state->mem32;
+ state->v1 = XXH32_round(state->v1, XXH_readLE32(p32, endian)); p32++;
+ state->v2 = XXH32_round(state->v2, XXH_readLE32(p32, endian)); p32++;
+ state->v3 = XXH32_round(state->v3, XXH_readLE32(p32, endian)); p32++;
+ state->v4 = XXH32_round(state->v4, XXH_readLE32(p32, endian));
+ }
+ p += 16-state->memsize;
+ state->memsize = 0;
+ }
+
+ if (p <= bEnd-16) {
+ const BYTE* const limit = bEnd - 16;
+ U32 v1 = state->v1;
+ U32 v2 = state->v2;
+ U32 v3 = state->v3;
+ U32 v4 = state->v4;
+
+ do {
+ v1 = XXH32_round(v1, XXH_readLE32(p, endian)); p+=4;
+ v2 = XXH32_round(v2, XXH_readLE32(p, endian)); p+=4;
+ v3 = XXH32_round(v3, XXH_readLE32(p, endian)); p+=4;
+ v4 = XXH32_round(v4, XXH_readLE32(p, endian)); p+=4;
+ } while (p<=limit);
+
+ state->v1 = v1;
+ state->v2 = v2;
+ state->v3 = v3;
+ state->v4 = v4;
+ }
+
+ if (p < bEnd) {
+ XXH_memcpy(state->mem32, p, (size_t)(bEnd-p));
+ state->memsize = (unsigned)(bEnd-p);
+ }
+ }
+
+ return XXH_OK;
+}
+
+
+XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len)
+{
+ XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
+
+ if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+ return XXH32_update_endian(state_in, input, len, XXH_littleEndian);
+ else
+ return XXH32_update_endian(state_in, input, len, XXH_bigEndian);
+}
+
+
+FORCE_INLINE U32
+XXH32_digest_endian (const XXH32_state_t* state, XXH_endianess endian)
+{
+ U32 h32;
+
+ if (state->large_len) {
+ h32 = XXH_rotl32(state->v1, 1)
+ + XXH_rotl32(state->v2, 7)
+ + XXH_rotl32(state->v3, 12)
+ + XXH_rotl32(state->v4, 18);
+ } else {
+ h32 = state->v3 /* == seed */ + PRIME32_5;
+ }
+
+ h32 += state->total_len_32;
+
+ return XXH32_finalize(h32, state->mem32, state->memsize, endian, XXH_aligned);
+}
+
+
+XXH_PUBLIC_API unsigned int XXH32_digest (const XXH32_state_t* state_in)
+{
+ XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
+
+ if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+ return XXH32_digest_endian(state_in, XXH_littleEndian);
+ else
+ return XXH32_digest_endian(state_in, XXH_bigEndian);
+}
+
+
+/*====== Canonical representation ======*/
+
+/*! Default XXH result types are basic unsigned 32 and 64 bits.
+* The canonical representation follows human-readable write convention, aka big-endian (large digits first).
+* These functions allow transformation of hash result into and from its canonical format.
+* This way, hash values can be written into a file or buffer, remaining comparable across different systems.
+*/
+
+XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash)
+{
+ XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t));
+ if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash);
+ memcpy(dst, &hash, sizeof(*dst));
+}
+
+XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src)
+{
+ return XXH_readBE32(src);
+}
+
+
+#ifndef XXH_NO_LONG_LONG
+
+/* *******************************************************************
+* 64-bit hash functions
+*********************************************************************/
+
+/*====== Memory access ======*/
+
+#ifndef MEM_MODULE
+# define MEM_MODULE
+# if !defined (__VMS) \
+ && (defined (__cplusplus) \
+ || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
+# include <stdint.h>
+ typedef uint64_t U64;
+# else
+ /* if compiler doesn't support unsigned long long, replace by another 64-bit type */
+ typedef unsigned long long U64;
+# endif
+#endif
+
+
+#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
+
+/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
+static U64 XXH_read64(const void* memPtr) { return *(const U64*) memPtr; }
+
+#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
+
+/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
+/* currently only defined for gcc and icc */
+typedef union { U32 u32; U64 u64; } __attribute__((packed)) unalign64;
+static U64 XXH_read64(const void* ptr) { return ((const unalign64*)ptr)->u64; }
+
+#else
+
+/* portable and safe solution. Generally efficient.
+ * see : http://stackoverflow.com/a/32095106/646947
+ */
+
+static U64 XXH_read64(const void* memPtr)
+{
+ U64 val;
+ memcpy(&val, memPtr, sizeof(val));
+ return val;
+}
+
+#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
+
+#if defined(_MSC_VER) /* Visual Studio */
+# define XXH_swap64 _byteswap_uint64
+#elif XXH_GCC_VERSION >= 403
+# define XXH_swap64 __builtin_bswap64
+#else
+static U64 XXH_swap64 (U64 x)
+{
+ return ((x << 56) & 0xff00000000000000ULL) |
+ ((x << 40) & 0x00ff000000000000ULL) |
+ ((x << 24) & 0x0000ff0000000000ULL) |
+ ((x << 8) & 0x000000ff00000000ULL) |
+ ((x >> 8) & 0x00000000ff000000ULL) |
+ ((x >> 24) & 0x0000000000ff0000ULL) |
+ ((x >> 40) & 0x000000000000ff00ULL) |
+ ((x >> 56) & 0x00000000000000ffULL);
+}
+#endif
+
+FORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
+{
+ if (align==XXH_unaligned)
+ return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
+ else
+ return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr);
+}
+
+FORCE_INLINE U64 XXH_readLE64(const void* ptr, XXH_endianess endian)
+{
+ return XXH_readLE64_align(ptr, endian, XXH_unaligned);
+}
+
+static U64 XXH_readBE64(const void* ptr)
+{
+ return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr);
+}
+
+
+/*====== xxh64 ======*/
+
+static const U64 PRIME64_1 = 11400714785074694791ULL; /* 0b1001111000110111011110011011000110000101111010111100101010000111 */
+static const U64 PRIME64_2 = 14029467366897019727ULL; /* 0b1100001010110010101011100011110100100111110101001110101101001111 */
+static const U64 PRIME64_3 = 1609587929392839161ULL; /* 0b0001011001010110011001111011000110011110001101110111100111111001 */
+static const U64 PRIME64_4 = 9650029242287828579ULL; /* 0b1000010111101011110010100111011111000010101100101010111001100011 */
+static const U64 PRIME64_5 = 2870177450012600261ULL; /* 0b0010011111010100111010110010111100010110010101100110011111000101 */
+
+static U64 XXH64_round(U64 acc, U64 input)
+{
+ acc += input * PRIME64_2;
+ acc = XXH_rotl64(acc, 31);
+ acc *= PRIME64_1;
+ return acc;
+}
+
+static U64 XXH64_mergeRound(U64 acc, U64 val)
+{
+ val = XXH64_round(0, val);
+ acc ^= val;
+ acc = acc * PRIME64_1 + PRIME64_4;
+ return acc;
+}
+
+static U64 XXH64_avalanche(U64 h64)
+{
+ h64 ^= h64 >> 33;
+ h64 *= PRIME64_2;
+ h64 ^= h64 >> 29;
+ h64 *= PRIME64_3;
+ h64 ^= h64 >> 32;
+ return h64;
+}
+
+
+#define XXH_get64bits(p) XXH_readLE64_align(p, endian, align)
+
+static U64
+XXH64_finalize(U64 h64, const void* ptr, size_t len,
+ XXH_endianess endian, XXH_alignment align)
+{
+ const BYTE* p = (const BYTE*)ptr;
+
+#define PROCESS1_64 \
+ h64 ^= (*p++) * PRIME64_5; \
+ h64 = XXH_rotl64(h64, 11) * PRIME64_1
+
+#define PROCESS4_64 \
+ h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1; \
+ p+=4; \
+ h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3
+
+#define PROCESS8_64 do { \
+ U64 const k1 = XXH64_round(0, XXH_get64bits(p)); \
+ p+=8; \
+ h64 ^= k1; \
+ h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; \
+} while (0)
+
+ switch(len&31) {
+ case 24: PROCESS8_64;
+ /* fallthrough */
+ case 16: PROCESS8_64;
+ /* fallthrough */
+ case 8: PROCESS8_64;
+ return XXH64_avalanche(h64);
+
+ case 28: PROCESS8_64;
+ /* fallthrough */
+ case 20: PROCESS8_64;
+ /* fallthrough */
+ case 12: PROCESS8_64;
+ /* fallthrough */
+ case 4: PROCESS4_64;
+ return XXH64_avalanche(h64);
+
+ case 25: PROCESS8_64;
+ /* fallthrough */
+ case 17: PROCESS8_64;
+ /* fallthrough */
+ case 9: PROCESS8_64;
+ PROCESS1_64;
+ return XXH64_avalanche(h64);
+
+ case 29: PROCESS8_64;
+ /* fallthrough */
+ case 21: PROCESS8_64;
+ /* fallthrough */
+ case 13: PROCESS8_64;
+ /* fallthrough */
+ case 5: PROCESS4_64;
+ PROCESS1_64;
+ return XXH64_avalanche(h64);
+
+ case 26: PROCESS8_64;
+ /* fallthrough */
+ case 18: PROCESS8_64;
+ /* fallthrough */
+ case 10: PROCESS8_64;
+ PROCESS1_64;
+ PROCESS1_64;
+ return XXH64_avalanche(h64);
+
+ case 30: PROCESS8_64;
+ /* fallthrough */
+ case 22: PROCESS8_64;
+ /* fallthrough */
+ case 14: PROCESS8_64;
+ /* fallthrough */
+ case 6: PROCESS4_64;
+ PROCESS1_64;
+ PROCESS1_64;
+ return XXH64_avalanche(h64);
+
+ case 27: PROCESS8_64;
+ /* fallthrough */
+ case 19: PROCESS8_64;
+ /* fallthrough */
+ case 11: PROCESS8_64;
+ PROCESS1_64;
+ PROCESS1_64;
+ PROCESS1_64;
+ return XXH64_avalanche(h64);
+
+ case 31: PROCESS8_64;
+ /* fallthrough */
+ case 23: PROCESS8_64;
+ /* fallthrough */
+ case 15: PROCESS8_64;
+ /* fallthrough */
+ case 7: PROCESS4_64;
+ /* fallthrough */
+ case 3: PROCESS1_64;
+ /* fallthrough */
+ case 2: PROCESS1_64;
+ /* fallthrough */
+ case 1: PROCESS1_64;
+ /* fallthrough */
+ case 0: return XXH64_avalanche(h64);
+ }
+
+ /* impossible to reach */
+ assert(0);
+ return 0; /* unreachable, but some compilers complain without it */
+}
+
+FORCE_INLINE U64
+XXH64_endian_align(const void* input, size_t len, U64 seed,
+ XXH_endianess endian, XXH_alignment align)
+{
+ const BYTE* p = (const BYTE*)input;
+ const BYTE* bEnd = p + len;
+ U64 h64;
+
+#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
+ if (p==NULL) {
+ len=0;
+ bEnd=p=(const BYTE*)(size_t)32;
+ }
+#endif
+
+ if (len>=32) {
+ const BYTE* const limit = bEnd - 32;
+ U64 v1 = seed + PRIME64_1 + PRIME64_2;
+ U64 v2 = seed + PRIME64_2;
+ U64 v3 = seed + 0;
+ U64 v4 = seed - PRIME64_1;
+
+ do {
+ v1 = XXH64_round(v1, XXH_get64bits(p)); p+=8;
+ v2 = XXH64_round(v2, XXH_get64bits(p)); p+=8;
+ v3 = XXH64_round(v3, XXH_get64bits(p)); p+=8;
+ v4 = XXH64_round(v4, XXH_get64bits(p)); p+=8;
+ } while (p<=limit);
+
+ h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
+ h64 = XXH64_mergeRound(h64, v1);
+ h64 = XXH64_mergeRound(h64, v2);
+ h64 = XXH64_mergeRound(h64, v3);
+ h64 = XXH64_mergeRound(h64, v4);
+
+ } else {
+ h64 = seed + PRIME64_5;
+ }
+
+ h64 += (U64) len;
+
+ return XXH64_finalize(h64, p, len, endian, align);
+}
+
+
+XXH_PUBLIC_API unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed)
+{
+#if 0
+ /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
+ XXH64_state_t state;
+ XXH64_reset(&state, seed);
+ XXH64_update(&state, input, len);
+ return XXH64_digest(&state);
+#else
+ XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
+
+ if (XXH_FORCE_ALIGN_CHECK) {
+ if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */
+ if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+ return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
+ else
+ return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
+ } }
+
+ if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+ return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
+ else
+ return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
+#endif
+}
+
+/*====== Hash Streaming ======*/
+
+XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void)
+{
+ return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
+}
+XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr)
+{
+ XXH_free(statePtr);
+ return XXH_OK;
+}
+
+XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dstState, const XXH64_state_t* srcState)
+{
+ memcpy(dstState, srcState, sizeof(*dstState));
+}
+
+XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed)
+{
+ XXH64_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
+ memset(&state, 0, sizeof(state));
+ state.v1 = seed + PRIME64_1 + PRIME64_2;
+ state.v2 = seed + PRIME64_2;
+ state.v3 = seed + 0;
+ state.v4 = seed - PRIME64_1;
+ /* do not write into reserved, planned to be removed in a future version */
+ memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved));
+ return XXH_OK;
+}
+
+FORCE_INLINE XXH_errorcode
+XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian)
+{
+ if (input==NULL)
+#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
+ return XXH_OK;
+#else
+ return XXH_ERROR;
+#endif
+
+ { const BYTE* p = (const BYTE*)input;
+ const BYTE* const bEnd = p + len;
+
+ state->total_len += len;
+
+ if (state->memsize + len < 32) { /* fill in tmp buffer */
+ XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len);
+ state->memsize += (U32)len;
+ return XXH_OK;
+ }
+
+ if (state->memsize) { /* tmp buffer is full */
+ XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize);
+ state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0, endian));
+ state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1, endian));
+ state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2, endian));
+ state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3, endian));
+ p += 32-state->memsize;
+ state->memsize = 0;
+ }
+
+ if (p+32 <= bEnd) {
+ const BYTE* const limit = bEnd - 32;
+ U64 v1 = state->v1;
+ U64 v2 = state->v2;
+ U64 v3 = state->v3;
+ U64 v4 = state->v4;
+
+ do {
+ v1 = XXH64_round(v1, XXH_readLE64(p, endian)); p+=8;
+ v2 = XXH64_round(v2, XXH_readLE64(p, endian)); p+=8;
+ v3 = XXH64_round(v3, XXH_readLE64(p, endian)); p+=8;
+ v4 = XXH64_round(v4, XXH_readLE64(p, endian)); p+=8;
+ } while (p<=limit);
+
+ state->v1 = v1;
+ state->v2 = v2;
+ state->v3 = v3;
+ state->v4 = v4;
+ }
+
+ if (p < bEnd) {
+ XXH_memcpy(state->mem64, p, (size_t)(bEnd-p));
+ state->memsize = (unsigned)(bEnd-p);
+ }
+ }
+
+ return XXH_OK;
+}
+
+XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t len)
+{
+ XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
+
+ if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+ return XXH64_update_endian(state_in, input, len, XXH_littleEndian);
+ else
+ return XXH64_update_endian(state_in, input, len, XXH_bigEndian);
+}
+
+FORCE_INLINE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess endian)
+{
+ U64 h64;
+
+ if (state->total_len >= 32) {
+ U64 const v1 = state->v1;
+ U64 const v2 = state->v2;
+ U64 const v3 = state->v3;
+ U64 const v4 = state->v4;
+
+ h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
+ h64 = XXH64_mergeRound(h64, v1);
+ h64 = XXH64_mergeRound(h64, v2);
+ h64 = XXH64_mergeRound(h64, v3);
+ h64 = XXH64_mergeRound(h64, v4);
+ } else {
+ h64 = state->v3 /*seed*/ + PRIME64_5;
+ }
+
+ h64 += (U64) state->total_len;
+
+ return XXH64_finalize(h64, state->mem64, (size_t)state->total_len, endian, XXH_aligned);
+}
+
+XXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* state_in)
+{
+ XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
+
+ if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+ return XXH64_digest_endian(state_in, XXH_littleEndian);
+ else
+ return XXH64_digest_endian(state_in, XXH_bigEndian);
+}
+
+
+/*====== Canonical representation ======*/
+
+XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash)
+{
+ XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t));
+ if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash);
+ memcpy(dst, &hash, sizeof(*dst));
+}
+
+XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src)
+{
+ return XXH_readBE64(src);
+}
+
+#endif /* XXH_NO_LONG_LONG */
diff --git a/gfx/angle/checkout/src/common/third_party/xxhash/xxhash.h b/gfx/angle/checkout/src/common/third_party/xxhash/xxhash.h
new file mode 100644
index 0000000000..0de203c947
--- /dev/null
+++ b/gfx/angle/checkout/src/common/third_party/xxhash/xxhash.h
@@ -0,0 +1,341 @@
+/*
+ xxHash - Extremely Fast Hash algorithm
+ Header File
+ Copyright (C) 2012-2016, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - xxHash source repository : https://github.com/Cyan4973/xxHash
+*/
+
+/* Notice extracted from xxHash homepage :
+
+xxHash is an extremely fast Hash algorithm, running at RAM speed limits.
+It also successfully passes all tests from the SMHasher suite.
+
+Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo @3GHz)
+
+Name Speed Q.Score Author
+xxHash 5.4 GB/s 10
+CrapWow 3.2 GB/s 2 Andrew
+MumurHash 3a 2.7 GB/s 10 Austin Appleby
+SpookyHash 2.0 GB/s 10 Bob Jenkins
+SBox 1.4 GB/s 9 Bret Mulvey
+Lookup3 1.2 GB/s 9 Bob Jenkins
+SuperFastHash 1.2 GB/s 1 Paul Hsieh
+CityHash64 1.05 GB/s 10 Pike & Alakuijala
+FNV 0.55 GB/s 5 Fowler, Noll, Vo
+CRC32 0.43 GB/s 9
+MD5-32 0.33 GB/s 10 Ronald L. Rivest
+SHA1-32 0.28 GB/s 10
+
+Q.Score is a measure of quality of the hash function.
+It depends on successfully passing SMHasher test set.
+10 is a perfect score.
+
+A 64-bit version, named XXH64, is available since r35.
+It offers much better speed, but for 64-bit applications only.
+Name Speed on 64 bits Speed on 32 bits
+XXH64 13.8 GB/s 1.9 GB/s
+XXH32 6.8 GB/s 6.0 GB/s
+*/
+
+#ifndef XXHASH_H_5627135585666179
+#define XXHASH_H_5627135585666179 1
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* ****************************
+ * Definitions
+ ******************************/
+#include <stddef.h> /* size_t */
+typedef enum
+{
+ XXH_OK = 0,
+ XXH_ERROR
+} XXH_errorcode;
+
+/* ****************************
+ * API modifier
+ ******************************/
+/** XXH_INLINE_ALL (and XXH_PRIVATE_API)
+ * This is useful to include xxhash functions in `static` mode
+ * in order to inline them, and remove their symbol from the public list.
+ * Inlining can offer dramatic performance improvement on small keys.
+ * Methodology :
+ * #define XXH_INLINE_ALL
+ * #include "xxhash.h"
+ * `xxhash.c` is automatically included.
+ * It's not useful to compile and link it as a separate module.
+ */
+#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)
+# ifndef XXH_STATIC_LINKING_ONLY
+# define XXH_STATIC_LINKING_ONLY
+# endif
+# if defined(__GNUC__)
+# define XXH_PUBLIC_API static __inline __attribute__((unused))
+#elif defined(__cplusplus) || (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 \
+ */)
+# define XXH_PUBLIC_API static inline
+# elif defined(_MSC_VER)
+# define XXH_PUBLIC_API static __inline
+# else
+/* this version may generate warnings for unused static functions */
+# define XXH_PUBLIC_API static
+# endif
+#else
+# define XXH_PUBLIC_API /* do nothing */
+#endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */
+
+/*! XXH_NAMESPACE, aka Namespace Emulation :
+ *
+ * If you want to include _and expose_ xxHash functions from within your own library,
+ * but also want to avoid symbol collisions with other libraries which may also include xxHash,
+ *
+ * you can use XXH_NAMESPACE, to automatically prefix any public symbol from xxhash library
+ * with the value of XXH_NAMESPACE (therefore, avoid NULL and numeric values).
+ *
+ * Note that no change is required within the calling program as long as it includes `xxhash.h` :
+ * regular symbol name will be automatically translated by this header.
+ */
+#ifdef XXH_NAMESPACE
+# define XXH_CAT(A, B) A##B
+# define XXH_NAME2(A, B) XXH_CAT(A, B)
+# define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber)
+# define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32)
+# define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState)
+# define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState)
+# define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset)
+# define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update)
+# define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest)
+# define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState)
+# define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash)
+# define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical)
+# define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64)
+# define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState)
+# define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState)
+# define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset)
+# define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update)
+# define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest)
+# define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState)
+# define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash)
+# define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical)
+#endif
+
+/* *************************************
+ * Version
+ ***************************************/
+#define XXH_VERSION_MAJOR 0
+#define XXH_VERSION_MINOR 6
+#define XXH_VERSION_RELEASE 5
+#define XXH_VERSION_NUMBER \
+ (XXH_VERSION_MAJOR * 100 * 100 + XXH_VERSION_MINOR * 100 + XXH_VERSION_RELEASE)
+XXH_PUBLIC_API unsigned XXH_versionNumber(void);
+
+/*-**********************************************************************
+ * 32-bit hash
+ ************************************************************************/
+typedef unsigned int XXH32_hash_t;
+
+/*! XXH32() :
+ Calculate the 32-bit hash of sequence "length" bytes stored at memory address "input".
+ The memory between input & input+length must be valid (allocated and read-accessible).
+ "seed" can be used to alter the result predictably.
+ Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s */
+XXH_PUBLIC_API XXH32_hash_t XXH32(const void *input, size_t length, unsigned int seed);
+
+/*====== Streaming ======*/
+typedef struct XXH32_state_s XXH32_state_t; /* incomplete type */
+XXH_PUBLIC_API XXH32_state_t *XXH32_createState(void);
+XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t *statePtr);
+XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t *dst_state, const XXH32_state_t *src_state);
+
+XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t *statePtr, unsigned int seed);
+XXH_PUBLIC_API XXH_errorcode XXH32_update(XXH32_state_t *statePtr,
+ const void *input,
+ size_t length);
+XXH_PUBLIC_API XXH32_hash_t XXH32_digest(const XXH32_state_t *statePtr);
+
+/*
+ * Streaming functions generate the xxHash of an input provided in multiple segments.
+ * Note that, for small input, they are slower than single-call functions, due to state
+ * management. For small inputs, prefer `XXH32()` and `XXH64()`, which are better optimized.
+ *
+ * XXH state must first be allocated, using XXH*_createState() .
+ *
+ * Start a new hash by initializing state with a seed, using XXH*_reset().
+ *
+ * Then, feed the hash state by calling XXH*_update() as many times as necessary.
+ * The function returns an error code, with 0 meaning OK, and any other value meaning there is
+ * an error.
+ *
+ * Finally, a hash value can be produced anytime, by using XXH*_digest().
+ * This function returns the nn-bits hash as an int or long long.
+ *
+ * It's still possible to continue inserting input into the hash state after a digest,
+ * and generate some new hashes later on, by calling again XXH*_digest().
+ *
+ * When done, free XXH state space if it was allocated dynamically.
+ */
+
+/*====== Canonical representation ======*/
+
+typedef struct
+{
+ unsigned char digest[4];
+} XXH32_canonical_t;
+XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t *dst, XXH32_hash_t hash);
+XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t *src);
+
+/* Default result type for XXH functions are primitive unsigned 32 and 64 bits.
+ * The canonical representation uses human-readable write convention, aka big-endian (large
+ * digits first). These functions allow transformation of hash result into and from its
+ * canonical format. This way, hash values can be written into a file / memory, and remain
+ * comparable on different systems and programs.
+ */
+
+#ifndef XXH_NO_LONG_LONG
+/*-**********************************************************************
+ * 64-bit hash
+ ************************************************************************/
+typedef unsigned long long XXH64_hash_t;
+
+/*! XXH64() :
+ Calculate the 64-bit hash of sequence of length "len" stored at memory address "input".
+ "seed" can be used to alter the result predictably.
+ This function runs faster on 64-bit systems, but slower on 32-bit systems (see benchmark).
+*/
+XXH_PUBLIC_API XXH64_hash_t XXH64(const void *input, size_t length, unsigned long long seed);
+
+/*====== Streaming ======*/
+typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */
+XXH_PUBLIC_API XXH64_state_t *XXH64_createState(void);
+XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t *statePtr);
+XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t *dst_state, const XXH64_state_t *src_state);
+
+XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t *statePtr, unsigned long long seed);
+XXH_PUBLIC_API XXH_errorcode XXH64_update(XXH64_state_t *statePtr,
+ const void *input,
+ size_t length);
+XXH_PUBLIC_API XXH64_hash_t XXH64_digest(const XXH64_state_t *statePtr);
+
+/*====== Canonical representation ======*/
+typedef struct
+{
+ unsigned char digest[8];
+} XXH64_canonical_t;
+XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t *dst, XXH64_hash_t hash);
+XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t *src);
+#endif /* XXH_NO_LONG_LONG */
+
+#ifdef XXH_STATIC_LINKING_ONLY
+
+/* ================================================================================================
+ This section contains declarations which are not guaranteed to remain stable.
+ They may change in future versions, becoming incompatible with a different version of the
+library. These declarations should only be used with static linking. Never use them in
+association with dynamic linking !
+===================================================================================================
+*/
+
+/* These definitions are only present to allow
+ * static allocation of XXH state, on stack or in a struct for example.
+ * Never **ever** use members directly. */
+
+# if !defined(__VMS) && (defined(__cplusplus) || (defined(__STDC_VERSION__) && \
+ (__STDC_VERSION__ >= 199901L) /* C99 */))
+# include <stdint.h>
+
+struct XXH32_state_s
+{
+ uint32_t total_len_32;
+ uint32_t large_len;
+ uint32_t v1;
+ uint32_t v2;
+ uint32_t v3;
+ uint32_t v4;
+ uint32_t mem32[4];
+ uint32_t memsize;
+ uint32_t reserved; /* never read nor write, might be removed in a future version */
+}; /* typedef'd to XXH32_state_t */
+
+struct XXH64_state_s
+{
+ uint64_t total_len;
+ uint64_t v1;
+ uint64_t v2;
+ uint64_t v3;
+ uint64_t v4;
+ uint64_t mem64[4];
+ uint32_t memsize;
+ uint32_t reserved[2]; /* never read nor write, might be removed in a future version */
+}; /* typedef'd to XXH64_state_t */
+
+# else
+
+struct XXH32_state_s
+{
+ unsigned total_len_32;
+ unsigned large_len;
+ unsigned v1;
+ unsigned v2;
+ unsigned v3;
+ unsigned v4;
+ unsigned mem32[4];
+ unsigned memsize;
+ unsigned reserved; /* never read nor write, might be removed in a future version */
+}; /* typedef'd to XXH32_state_t */
+
+# ifndef XXH_NO_LONG_LONG /* remove 64-bit support */
+struct XXH64_state_s
+{
+ unsigned long long total_len;
+ unsigned long long v1;
+ unsigned long long v2;
+ unsigned long long v3;
+ unsigned long long v4;
+ unsigned long long mem64[4];
+ unsigned memsize;
+ unsigned reserved[2]; /* never read nor write, might be removed in a future version */
+}; /* typedef'd to XXH64_state_t */
+# endif
+
+# endif
+
+# if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)
+# include "xxhash.c" /* include xxhash function bodies as `static`, for inlining */
+# endif
+
+#endif /* XXH_STATIC_LINKING_ONLY */
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* XXHASH_H_5627135585666179 */
diff --git a/gfx/angle/checkout/src/common/tls.cpp b/gfx/angle/checkout/src/common/tls.cpp
new file mode 100644
index 0000000000..458f5b816f
--- /dev/null
+++ b/gfx/angle/checkout/src/common/tls.cpp
@@ -0,0 +1,156 @@
+//
+// Copyright 2014 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+
+// tls.cpp: Simple cross-platform interface for thread local storage.
+
+#include "common/tls.h"
+
+#include "common/debug.h"
+
+#ifdef ANGLE_ENABLE_WINDOWS_UWP
+# include <map>
+# include <mutex>
+# include <set>
+# include <vector>
+
+# include <Windows.System.Threading.h>
+# include <wrl/async.h>
+# include <wrl/client.h>
+
+using namespace std;
+using namespace Windows::Foundation;
+using namespace ABI::Windows::System::Threading;
+
+// Thread local storage for Windows Store support
+typedef vector<void *> ThreadLocalData;
+
+static __declspec(thread) ThreadLocalData *currentThreadData = nullptr;
+static set<ThreadLocalData *> allThreadData;
+static DWORD nextTlsIndex = 0;
+static vector<DWORD> freeTlsIndices;
+
+#endif
+
+TLSIndex CreateTLSIndex(PthreadKeyDestructor destructor)
+{
+ TLSIndex index;
+
+#ifdef ANGLE_PLATFORM_WINDOWS
+# ifdef ANGLE_ENABLE_WINDOWS_UWP
+ if (!freeTlsIndices.empty())
+ {
+ DWORD result = freeTlsIndices.back();
+ freeTlsIndices.pop_back();
+ index = result;
+ }
+ else
+ {
+ index = nextTlsIndex++;
+ }
+# else
+ index = TlsAlloc();
+# endif
+
+#elif defined(ANGLE_PLATFORM_POSIX)
+ // Create pthread key
+ if ((pthread_key_create(&index, destructor)) != 0)
+ {
+ index = TLS_INVALID_INDEX;
+ }
+#endif
+
+ ASSERT(index != TLS_INVALID_INDEX && "CreateTLSIndex: Unable to allocate Thread Local Storage");
+ return index;
+}
+
+bool DestroyTLSIndex(TLSIndex index)
+{
+ ASSERT(index != TLS_INVALID_INDEX && "DestroyTLSIndex(): Invalid TLS Index");
+ if (index == TLS_INVALID_INDEX)
+ {
+ return false;
+ }
+
+#ifdef ANGLE_PLATFORM_WINDOWS
+# ifdef ANGLE_ENABLE_WINDOWS_UWP
+ ASSERT(index < nextTlsIndex);
+ ASSERT(find(freeTlsIndices.begin(), freeTlsIndices.end(), index) == freeTlsIndices.end());
+
+ freeTlsIndices.push_back(index);
+ for (auto threadData : allThreadData)
+ {
+ if (threadData->size() > index)
+ {
+ threadData->at(index) = nullptr;
+ }
+ }
+ return true;
+# else
+ return (TlsFree(index) == TRUE);
+# endif
+#elif defined(ANGLE_PLATFORM_POSIX)
+ return (pthread_key_delete(index) == 0);
+#endif
+}
+
+bool SetTLSValue(TLSIndex index, void *value)
+{
+ ASSERT(index != TLS_INVALID_INDEX && "SetTLSValue(): Invalid TLS Index");
+ if (index == TLS_INVALID_INDEX)
+ {
+ return false;
+ }
+
+#ifdef ANGLE_PLATFORM_WINDOWS
+# ifdef ANGLE_ENABLE_WINDOWS_UWP
+ ThreadLocalData *threadData = currentThreadData;
+ if (!threadData)
+ {
+ threadData = new ThreadLocalData(index + 1, nullptr);
+ allThreadData.insert(threadData);
+ currentThreadData = threadData;
+ }
+ else if (threadData->size() <= index)
+ {
+ threadData->resize(index + 1, nullptr);
+ }
+
+ threadData->at(index) = value;
+ return true;
+# else
+ return (TlsSetValue(index, value) == TRUE);
+# endif
+#elif defined(ANGLE_PLATFORM_POSIX)
+ return (pthread_setspecific(index, value) == 0);
+#endif
+}
+
+void *GetTLSValue(TLSIndex index)
+{
+ ASSERT(index != TLS_INVALID_INDEX && "GetTLSValue(): Invalid TLS Index");
+ if (index == TLS_INVALID_INDEX)
+ {
+ return nullptr;
+ }
+
+#ifdef ANGLE_PLATFORM_WINDOWS
+# ifdef ANGLE_ENABLE_WINDOWS_UWP
+ ThreadLocalData *threadData = currentThreadData;
+ if (threadData && threadData->size() > index)
+ {
+ return threadData->at(index);
+ }
+ else
+ {
+ return nullptr;
+ }
+# else
+ return TlsGetValue(index);
+# endif
+#elif defined(ANGLE_PLATFORM_POSIX)
+ return pthread_getspecific(index);
+#endif
+}
diff --git a/gfx/angle/checkout/src/common/tls.h b/gfx/angle/checkout/src/common/tls.h
new file mode 100644
index 0000000000..4075f3c030
--- /dev/null
+++ b/gfx/angle/checkout/src/common/tls.h
@@ -0,0 +1,54 @@
+//
+// Copyright 2014 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+
+// tls.h: Simple cross-platform interface for thread local storage.
+
+#ifndef COMMON_TLS_H_
+#define COMMON_TLS_H_
+
+#include "common/angleutils.h"
+#include "common/platform.h"
+
+#ifdef ANGLE_PLATFORM_WINDOWS
+# include <windows.h>
+#endif
+
+namespace gl
+{
+class Context;
+}
+
+#ifdef ANGLE_PLATFORM_WINDOWS
+
+// TLS does not exist for Windows Store and needs to be emulated
+# ifdef ANGLE_ENABLE_WINDOWS_UWP
+# ifndef TLS_OUT_OF_INDEXES
+# define TLS_OUT_OF_INDEXES static_cast<DWORD>(0xFFFFFFFF)
+# endif
+# ifndef CREATE_SUSPENDED
+# define CREATE_SUSPENDED 0x00000004
+# endif
+# endif
+typedef DWORD TLSIndex;
+# define TLS_INVALID_INDEX (TLS_OUT_OF_INDEXES)
+#elif defined(ANGLE_PLATFORM_POSIX)
+# include <errno.h>
+# include <pthread.h>
+# include <semaphore.h>
+typedef pthread_key_t TLSIndex;
+# define TLS_INVALID_INDEX (static_cast<TLSIndex>(-1))
+#else
+# error Unsupported platform.
+#endif
+
+using PthreadKeyDestructor = void (*)(void *);
+TLSIndex CreateTLSIndex(PthreadKeyDestructor destructor);
+bool DestroyTLSIndex(TLSIndex index);
+
+bool SetTLSValue(TLSIndex index, void *value);
+void *GetTLSValue(TLSIndex index);
+
+#endif // COMMON_TLS_H_
diff --git a/gfx/angle/checkout/src/common/uniform_type_info_autogen.cpp b/gfx/angle/checkout/src/common/uniform_type_info_autogen.cpp
new file mode 100644
index 0000000000..70249981a8
--- /dev/null
+++ b/gfx/angle/checkout/src/common/uniform_type_info_autogen.cpp
@@ -0,0 +1,378 @@
+// GENERATED FILE - DO NOT EDIT.
+// Generated by gen_uniform_type_table.py.
+//
+// Copyright 2017 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Uniform type info table:
+// Metadata about a particular uniform format, indexed by GL type.
+
+#include <array>
+#include "common/utilities.h"
+
+using namespace angle;
+
+namespace gl
+{
+
+namespace
+{
+constexpr std::array<UniformTypeInfo, 77> kInfoTable = {
+ {{GL_NONE, GL_NONE, GL_NONE, GL_NONE, GL_NONE, SamplerFormat::InvalidEnum, 0, 0, 0, 0, 0 * 0,
+ 0 * 0, false, false, false},
+ {GL_BOOL, GL_BOOL, GL_NONE, GL_NONE, GL_NONE, SamplerFormat::InvalidEnum, 1, 1, 1,
+ sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, false, false, false},
+ {GL_BOOL_VEC2, GL_BOOL, GL_NONE, GL_NONE, GL_BOOL_VEC2, SamplerFormat::InvalidEnum, 1, 2, 2,
+ sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 2, false, false, false},
+ {GL_BOOL_VEC3, GL_BOOL, GL_NONE, GL_NONE, GL_BOOL_VEC3, SamplerFormat::InvalidEnum, 1, 3, 3,
+ sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 3, false, false, false},
+ {GL_BOOL_VEC4, GL_BOOL, GL_NONE, GL_NONE, GL_BOOL_VEC4, SamplerFormat::InvalidEnum, 1, 4, 4,
+ sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 4, false, false, false},
+ {GL_FLOAT, GL_FLOAT, GL_NONE, GL_NONE, GL_BOOL, SamplerFormat::InvalidEnum, 1, 1, 1,
+ sizeof(GLfloat), sizeof(GLfloat) * 4, sizeof(GLfloat) * 1, false, false, false},
+ {GL_FLOAT_MAT2, GL_FLOAT, GL_NONE, GL_FLOAT_MAT2, GL_NONE, SamplerFormat::InvalidEnum, 2, 2, 4,
+ sizeof(GLfloat), sizeof(GLfloat) * 8, sizeof(GLfloat) * 4, false, true, false},
+ {GL_FLOAT_MAT2x3, GL_FLOAT, GL_NONE, GL_FLOAT_MAT3x2, GL_NONE, SamplerFormat::InvalidEnum, 3,
+ 2, 6, sizeof(GLfloat), sizeof(GLfloat) * 12, sizeof(GLfloat) * 6, false, true, false},
+ {GL_FLOAT_MAT2x4, GL_FLOAT, GL_NONE, GL_FLOAT_MAT4x2, GL_NONE, SamplerFormat::InvalidEnum, 4,
+ 2, 8, sizeof(GLfloat), sizeof(GLfloat) * 16, sizeof(GLfloat) * 8, false, true, false},
+ {GL_FLOAT_MAT3, GL_FLOAT, GL_NONE, GL_FLOAT_MAT3, GL_NONE, SamplerFormat::InvalidEnum, 3, 3, 9,
+ sizeof(GLfloat), sizeof(GLfloat) * 12, sizeof(GLfloat) * 9, false, true, false},
+ {GL_FLOAT_MAT3x2, GL_FLOAT, GL_NONE, GL_FLOAT_MAT2x3, GL_NONE, SamplerFormat::InvalidEnum, 2,
+ 3, 6, sizeof(GLfloat), sizeof(GLfloat) * 8, sizeof(GLfloat) * 6, false, true, false},
+ {GL_FLOAT_MAT3x4, GL_FLOAT, GL_NONE, GL_FLOAT_MAT4x3, GL_NONE, SamplerFormat::InvalidEnum, 4,
+ 3, 12, sizeof(GLfloat), sizeof(GLfloat) * 16, sizeof(GLfloat) * 12, false, true, false},
+ {GL_FLOAT_MAT4, GL_FLOAT, GL_NONE, GL_FLOAT_MAT4, GL_NONE, SamplerFormat::InvalidEnum, 4, 4,
+ 16, sizeof(GLfloat), sizeof(GLfloat) * 16, sizeof(GLfloat) * 16, false, true, false},
+ {GL_FLOAT_MAT4x2, GL_FLOAT, GL_NONE, GL_FLOAT_MAT2x4, GL_NONE, SamplerFormat::InvalidEnum, 2,
+ 4, 8, sizeof(GLfloat), sizeof(GLfloat) * 8, sizeof(GLfloat) * 8, false, true, false},
+ {GL_FLOAT_MAT4x3, GL_FLOAT, GL_NONE, GL_FLOAT_MAT3x4, GL_NONE, SamplerFormat::InvalidEnum, 3,
+ 4, 12, sizeof(GLfloat), sizeof(GLfloat) * 12, sizeof(GLfloat) * 12, false, true, false},
+ {GL_FLOAT_VEC2, GL_FLOAT, GL_NONE, GL_NONE, GL_BOOL_VEC2, SamplerFormat::InvalidEnum, 1, 2, 2,
+ sizeof(GLfloat), sizeof(GLfloat) * 4, sizeof(GLfloat) * 2, false, false, false},
+ {GL_FLOAT_VEC3, GL_FLOAT, GL_NONE, GL_NONE, GL_BOOL_VEC3, SamplerFormat::InvalidEnum, 1, 3, 3,
+ sizeof(GLfloat), sizeof(GLfloat) * 4, sizeof(GLfloat) * 3, false, false, false},
+ {GL_FLOAT_VEC4, GL_FLOAT, GL_NONE, GL_NONE, GL_BOOL_VEC4, SamplerFormat::InvalidEnum, 1, 4, 4,
+ sizeof(GLfloat), sizeof(GLfloat) * 4, sizeof(GLfloat) * 4, false, false, false},
+ {GL_IMAGE_2D, GL_INT, GL_TEXTURE_2D, GL_NONE, GL_NONE, SamplerFormat::InvalidEnum, 1, 1, 1,
+ sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, false, false, true},
+ {GL_IMAGE_2D_ARRAY, GL_INT, GL_TEXTURE_2D_ARRAY, GL_NONE, GL_NONE, SamplerFormat::InvalidEnum,
+ 1, 1, 1, sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, false, false, true},
+ {GL_IMAGE_3D, GL_INT, GL_TEXTURE_3D, GL_NONE, GL_NONE, SamplerFormat::InvalidEnum, 1, 1, 1,
+ sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, false, false, true},
+ {GL_IMAGE_CUBE, GL_INT, GL_TEXTURE_CUBE_MAP, GL_NONE, GL_NONE, SamplerFormat::InvalidEnum, 1,
+ 1, 1, sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, false, false, true},
+ {GL_IMAGE_CUBE_MAP_ARRAY, GL_INT, GL_TEXTURE_CUBE_MAP_ARRAY, GL_NONE, GL_NONE,
+ SamplerFormat::InvalidEnum, 1, 1, 1, sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1,
+ false, false, true},
+ {GL_IMAGE_BUFFER, GL_INT, GL_TEXTURE_BUFFER, GL_NONE, GL_NONE, SamplerFormat::InvalidEnum, 1,
+ 1, 1, sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, false, false, true},
+ {GL_INT, GL_INT, GL_NONE, GL_NONE, GL_BOOL, SamplerFormat::InvalidEnum, 1, 1, 1, sizeof(GLint),
+ sizeof(GLint) * 4, sizeof(GLint) * 1, false, false, false},
+ {GL_INT_IMAGE_2D, GL_INT, GL_TEXTURE_2D, GL_NONE, GL_NONE, SamplerFormat::InvalidEnum, 1, 1, 1,
+ sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, false, false, true},
+ {GL_INT_IMAGE_2D_ARRAY, GL_INT, GL_TEXTURE_2D_ARRAY, GL_NONE, GL_NONE,
+ SamplerFormat::InvalidEnum, 1, 1, 1, sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1,
+ false, false, true},
+ {GL_INT_IMAGE_3D, GL_INT, GL_TEXTURE_3D, GL_NONE, GL_NONE, SamplerFormat::InvalidEnum, 1, 1, 1,
+ sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, false, false, true},
+ {GL_INT_IMAGE_CUBE, GL_INT, GL_TEXTURE_CUBE_MAP, GL_NONE, GL_NONE, SamplerFormat::InvalidEnum,
+ 1, 1, 1, sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, false, false, true},
+ {GL_INT_IMAGE_CUBE_MAP_ARRAY, GL_INT, GL_TEXTURE_CUBE_MAP_ARRAY, GL_NONE, GL_NONE,
+ SamplerFormat::InvalidEnum, 1, 1, 1, sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1,
+ false, false, true},
+ {GL_INT_IMAGE_BUFFER, GL_INT, GL_TEXTURE_BUFFER, GL_NONE, GL_NONE, SamplerFormat::InvalidEnum,
+ 1, 1, 1, sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, false, false, true},
+ {GL_INT_SAMPLER_2D, GL_INT, GL_TEXTURE_2D, GL_NONE, GL_NONE, SamplerFormat::Signed, 1, 1, 1,
+ sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, true, false, false},
+ {GL_INT_SAMPLER_2D_ARRAY, GL_INT, GL_TEXTURE_2D_ARRAY, GL_NONE, GL_NONE, SamplerFormat::Signed,
+ 1, 1, 1, sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, true, false, false},
+ {GL_INT_SAMPLER_2D_MULTISAMPLE, GL_INT, GL_TEXTURE_2D_MULTISAMPLE, GL_NONE, GL_NONE,
+ SamplerFormat::Signed, 1, 1, 1, sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, true,
+ false, false},
+ {GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY, GL_INT, GL_TEXTURE_2D_MULTISAMPLE_ARRAY, GL_NONE,
+ GL_NONE, SamplerFormat::Signed, 1, 1, 1, sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1,
+ true, false, false},
+ {GL_INT_SAMPLER_3D, GL_INT, GL_TEXTURE_3D, GL_NONE, GL_NONE, SamplerFormat::Signed, 1, 1, 1,
+ sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, true, false, false},
+ {GL_INT_SAMPLER_CUBE, GL_INT, GL_TEXTURE_CUBE_MAP, GL_NONE, GL_NONE, SamplerFormat::Signed, 1,
+ 1, 1, sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, true, false, false},
+ {GL_INT_SAMPLER_CUBE_MAP_ARRAY, GL_INT, GL_TEXTURE_CUBE_MAP_ARRAY, GL_NONE, GL_NONE,
+ SamplerFormat::Signed, 1, 1, 1, sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, true,
+ false, false},
+ {GL_INT_SAMPLER_BUFFER, GL_INT, GL_TEXTURE_BUFFER, GL_NONE, GL_NONE, SamplerFormat::Signed, 1,
+ 1, 1, sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, true, false, false},
+ {GL_INT_VEC2, GL_INT, GL_NONE, GL_NONE, GL_BOOL_VEC2, SamplerFormat::InvalidEnum, 1, 2, 2,
+ sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 2, false, false, false},
+ {GL_INT_VEC3, GL_INT, GL_NONE, GL_NONE, GL_BOOL_VEC3, SamplerFormat::InvalidEnum, 1, 3, 3,
+ sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 3, false, false, false},
+ {GL_INT_VEC4, GL_INT, GL_NONE, GL_NONE, GL_BOOL_VEC4, SamplerFormat::InvalidEnum, 1, 4, 4,
+ sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 4, false, false, false},
+ {GL_SAMPLER_2D, GL_INT, GL_TEXTURE_2D, GL_NONE, GL_NONE, SamplerFormat::Float, 1, 1, 1,
+ sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, true, false, false},
+ {GL_SAMPLER_2D_ARRAY, GL_INT, GL_TEXTURE_2D_ARRAY, GL_NONE, GL_NONE, SamplerFormat::Float, 1,
+ 1, 1, sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, true, false, false},
+ {GL_SAMPLER_2D_ARRAY_SHADOW, GL_INT, GL_TEXTURE_2D_ARRAY, GL_NONE, GL_NONE,
+ SamplerFormat::Shadow, 1, 1, 1, sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, true,
+ false, false},
+ {GL_SAMPLER_2D_MULTISAMPLE, GL_INT, GL_TEXTURE_2D_MULTISAMPLE, GL_NONE, GL_NONE,
+ SamplerFormat::Float, 1, 1, 1, sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, true,
+ false, false},
+ {GL_SAMPLER_2D_MULTISAMPLE_ARRAY, GL_INT, GL_TEXTURE_2D_MULTISAMPLE_ARRAY, GL_NONE, GL_NONE,
+ SamplerFormat::Float, 1, 1, 1, sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, true,
+ false, false},
+ {GL_SAMPLER_2D_RECT_ANGLE, GL_INT, GL_TEXTURE_2D, GL_NONE, GL_NONE, SamplerFormat::Float, 1, 1,
+ 1, sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, true, false, false},
+ {GL_SAMPLER_2D_SHADOW, GL_INT, GL_TEXTURE_2D, GL_NONE, GL_NONE, SamplerFormat::Shadow, 1, 1, 1,
+ sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, true, false, false},
+ {GL_SAMPLER_3D, GL_INT, GL_TEXTURE_3D, GL_NONE, GL_NONE, SamplerFormat::Float, 1, 1, 1,
+ sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, true, false, false},
+ {GL_SAMPLER_CUBE, GL_INT, GL_TEXTURE_CUBE_MAP, GL_NONE, GL_NONE, SamplerFormat::Float, 1, 1, 1,
+ sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, true, false, false},
+ {GL_SAMPLER_CUBE_MAP_ARRAY, GL_INT, GL_TEXTURE_CUBE_MAP_ARRAY, GL_NONE, GL_NONE,
+ SamplerFormat::Float, 1, 1, 1, sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, true,
+ false, false},
+ {GL_SAMPLER_BUFFER, GL_INT, GL_TEXTURE_BUFFER, GL_NONE, GL_NONE, SamplerFormat::Float, 1, 1, 1,
+ sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, true, false, false},
+ {GL_SAMPLER_CUBE_SHADOW, GL_INT, GL_TEXTURE_CUBE_MAP, GL_NONE, GL_NONE, SamplerFormat::Shadow,
+ 1, 1, 1, sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, true, false, false},
+ {GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW, GL_INT, GL_NONE, GL_NONE, GL_NONE, SamplerFormat::Shadow, 1,
+ 1, 1, sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, true, false, false},
+ {GL_SAMPLER_EXTERNAL_OES, GL_INT, GL_TEXTURE_EXTERNAL_OES, GL_NONE, GL_NONE,
+ SamplerFormat::Float, 1, 1, 1, sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, true,
+ false, false},
+ {GL_UNSIGNED_INT, GL_UNSIGNED_INT, GL_NONE, GL_NONE, GL_BOOL, SamplerFormat::InvalidEnum, 1, 1,
+ 1, sizeof(GLuint), sizeof(GLuint) * 4, sizeof(GLuint) * 1, false, false, false},
+ {GL_UNSIGNED_INT_ATOMIC_COUNTER, GL_UNSIGNED_INT, GL_NONE, GL_NONE, GL_NONE,
+ SamplerFormat::InvalidEnum, 1, 1, 1, sizeof(GLuint), sizeof(GLuint) * 4, sizeof(GLuint) * 1,
+ false, false, false},
+ {GL_UNSIGNED_INT_IMAGE_2D, GL_UNSIGNED_INT, GL_TEXTURE_2D, GL_NONE, GL_NONE,
+ SamplerFormat::InvalidEnum, 1, 1, 1, sizeof(GLuint), sizeof(GLuint) * 4, sizeof(GLuint) * 1,
+ false, false, true},
+ {GL_UNSIGNED_INT_IMAGE_2D_ARRAY, GL_UNSIGNED_INT, GL_TEXTURE_2D_ARRAY, GL_NONE, GL_NONE,
+ SamplerFormat::InvalidEnum, 1, 1, 1, sizeof(GLuint), sizeof(GLuint) * 4, sizeof(GLuint) * 1,
+ false, false, true},
+ {GL_UNSIGNED_INT_IMAGE_3D, GL_UNSIGNED_INT, GL_TEXTURE_3D, GL_NONE, GL_NONE,
+ SamplerFormat::InvalidEnum, 1, 1, 1, sizeof(GLuint), sizeof(GLuint) * 4, sizeof(GLuint) * 1,
+ false, false, true},
+ {GL_UNSIGNED_INT_IMAGE_CUBE, GL_UNSIGNED_INT, GL_TEXTURE_CUBE_MAP, GL_NONE, GL_NONE,
+ SamplerFormat::InvalidEnum, 1, 1, 1, sizeof(GLuint), sizeof(GLuint) * 4, sizeof(GLuint) * 1,
+ false, false, true},
+ {GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY, GL_UNSIGNED_INT, GL_TEXTURE_CUBE_MAP_ARRAY, GL_NONE,
+ GL_NONE, SamplerFormat::InvalidEnum, 1, 1, 1, sizeof(GLuint), sizeof(GLuint) * 4,
+ sizeof(GLuint) * 1, false, false, true},
+ {GL_UNSIGNED_INT_IMAGE_BUFFER, GL_UNSIGNED_INT, GL_TEXTURE_BUFFER, GL_NONE, GL_NONE,
+ SamplerFormat::InvalidEnum, 1, 1, 1, sizeof(GLuint), sizeof(GLuint) * 4, sizeof(GLuint) * 1,
+ false, false, true},
+ {GL_UNSIGNED_INT_SAMPLER_2D, GL_UNSIGNED_INT, GL_TEXTURE_2D, GL_NONE, GL_NONE,
+ SamplerFormat::Unsigned, 1, 1, 1, sizeof(GLuint), sizeof(GLuint) * 4, sizeof(GLuint) * 1,
+ true, false, false},
+ {GL_UNSIGNED_INT_SAMPLER_2D_ARRAY, GL_UNSIGNED_INT, GL_TEXTURE_2D_ARRAY, GL_NONE, GL_NONE,
+ SamplerFormat::Unsigned, 1, 1, 1, sizeof(GLuint), sizeof(GLuint) * 4, sizeof(GLuint) * 1,
+ true, false, false},
+ {GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE, GL_UNSIGNED_INT, GL_TEXTURE_2D_MULTISAMPLE, GL_NONE,
+ GL_NONE, SamplerFormat::Unsigned, 1, 1, 1, sizeof(GLuint), sizeof(GLuint) * 4,
+ sizeof(GLuint) * 1, true, false, false},
+ {GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY, GL_UNSIGNED_INT,
+ GL_TEXTURE_2D_MULTISAMPLE_ARRAY, GL_NONE, GL_NONE, SamplerFormat::Unsigned, 1, 1, 1,
+ sizeof(GLuint), sizeof(GLuint) * 4, sizeof(GLuint) * 1, true, false, false},
+ {GL_UNSIGNED_INT_SAMPLER_3D, GL_UNSIGNED_INT, GL_TEXTURE_3D, GL_NONE, GL_NONE,
+ SamplerFormat::Unsigned, 1, 1, 1, sizeof(GLuint), sizeof(GLuint) * 4, sizeof(GLuint) * 1,
+ true, false, false},
+ {GL_UNSIGNED_INT_SAMPLER_CUBE, GL_UNSIGNED_INT, GL_TEXTURE_CUBE_MAP, GL_NONE, GL_NONE,
+ SamplerFormat::Unsigned, 1, 1, 1, sizeof(GLuint), sizeof(GLuint) * 4, sizeof(GLuint) * 1,
+ true, false, false},
+ {GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY, GL_UNSIGNED_INT, GL_TEXTURE_CUBE_MAP_ARRAY, GL_NONE,
+ GL_NONE, SamplerFormat::Unsigned, 1, 1, 1, sizeof(GLuint), sizeof(GLuint) * 4,
+ sizeof(GLuint) * 1, true, false, false},
+ {GL_UNSIGNED_INT_SAMPLER_BUFFER, GL_UNSIGNED_INT, GL_TEXTURE_BUFFER, GL_NONE, GL_NONE,
+ SamplerFormat::Unsigned, 1, 1, 1, sizeof(GLuint), sizeof(GLuint) * 4, sizeof(GLuint) * 1,
+ true, false, false},
+ {GL_UNSIGNED_INT_VEC2, GL_UNSIGNED_INT, GL_NONE, GL_NONE, GL_BOOL_VEC2,
+ SamplerFormat::InvalidEnum, 1, 2, 2, sizeof(GLuint), sizeof(GLuint) * 4, sizeof(GLuint) * 2,
+ false, false, false},
+ {GL_UNSIGNED_INT_VEC3, GL_UNSIGNED_INT, GL_NONE, GL_NONE, GL_BOOL_VEC3,
+ SamplerFormat::InvalidEnum, 1, 3, 3, sizeof(GLuint), sizeof(GLuint) * 4, sizeof(GLuint) * 3,
+ false, false, false},
+ {GL_UNSIGNED_INT_VEC4, GL_UNSIGNED_INT, GL_NONE, GL_NONE, GL_BOOL_VEC4,
+ SamplerFormat::InvalidEnum, 1, 4, 4, sizeof(GLuint), sizeof(GLuint) * 4, sizeof(GLuint) * 4,
+ false, false, false},
+ {GL_SAMPLER_VIDEO_IMAGE_WEBGL, GL_INT, GL_TEXTURE_VIDEO_IMAGE_WEBGL, GL_NONE, GL_NONE,
+ SamplerFormat::Float, 1, 1, 1, sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, true,
+ false, false},
+ {GL_SAMPLER_EXTERNAL_2D_Y2Y_EXT, GL_INT, GL_NONE, GL_NONE, GL_NONE, SamplerFormat::Float, 1, 1,
+ 1, sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, true, false, false}}};
+
+size_t GetTypeInfoIndex(GLenum uniformType)
+{
+ switch (uniformType)
+ {
+ case GL_NONE:
+ return 0;
+ case GL_BOOL:
+ return 1;
+ case GL_BOOL_VEC2:
+ return 2;
+ case GL_BOOL_VEC3:
+ return 3;
+ case GL_BOOL_VEC4:
+ return 4;
+ case GL_FLOAT:
+ return 5;
+ case GL_FLOAT_MAT2:
+ return 6;
+ case GL_FLOAT_MAT2x3:
+ return 7;
+ case GL_FLOAT_MAT2x4:
+ return 8;
+ case GL_FLOAT_MAT3:
+ return 9;
+ case GL_FLOAT_MAT3x2:
+ return 10;
+ case GL_FLOAT_MAT3x4:
+ return 11;
+ case GL_FLOAT_MAT4:
+ return 12;
+ case GL_FLOAT_MAT4x2:
+ return 13;
+ case GL_FLOAT_MAT4x3:
+ return 14;
+ case GL_FLOAT_VEC2:
+ return 15;
+ case GL_FLOAT_VEC3:
+ return 16;
+ case GL_FLOAT_VEC4:
+ return 17;
+ case GL_IMAGE_2D:
+ return 18;
+ case GL_IMAGE_2D_ARRAY:
+ return 19;
+ case GL_IMAGE_3D:
+ return 20;
+ case GL_IMAGE_CUBE:
+ return 21;
+ case GL_IMAGE_CUBE_MAP_ARRAY:
+ return 22;
+ case GL_IMAGE_BUFFER:
+ return 23;
+ case GL_INT:
+ return 24;
+ case GL_INT_IMAGE_2D:
+ return 25;
+ case GL_INT_IMAGE_2D_ARRAY:
+ return 26;
+ case GL_INT_IMAGE_3D:
+ return 27;
+ case GL_INT_IMAGE_CUBE:
+ return 28;
+ case GL_INT_IMAGE_CUBE_MAP_ARRAY:
+ return 29;
+ case GL_INT_IMAGE_BUFFER:
+ return 30;
+ case GL_INT_SAMPLER_2D:
+ return 31;
+ case GL_INT_SAMPLER_2D_ARRAY:
+ return 32;
+ case GL_INT_SAMPLER_2D_MULTISAMPLE:
+ return 33;
+ case GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY:
+ return 34;
+ case GL_INT_SAMPLER_3D:
+ return 35;
+ case GL_INT_SAMPLER_CUBE:
+ return 36;
+ case GL_INT_SAMPLER_CUBE_MAP_ARRAY:
+ return 37;
+ case GL_INT_SAMPLER_BUFFER:
+ return 38;
+ case GL_INT_VEC2:
+ return 39;
+ case GL_INT_VEC3:
+ return 40;
+ case GL_INT_VEC4:
+ return 41;
+ case GL_SAMPLER_2D:
+ return 42;
+ case GL_SAMPLER_2D_ARRAY:
+ return 43;
+ case GL_SAMPLER_2D_ARRAY_SHADOW:
+ return 44;
+ case GL_SAMPLER_2D_MULTISAMPLE:
+ return 45;
+ case GL_SAMPLER_2D_MULTISAMPLE_ARRAY:
+ return 46;
+ case GL_SAMPLER_2D_RECT_ANGLE:
+ return 47;
+ case GL_SAMPLER_2D_SHADOW:
+ return 48;
+ case GL_SAMPLER_3D:
+ return 49;
+ case GL_SAMPLER_CUBE:
+ return 50;
+ case GL_SAMPLER_CUBE_MAP_ARRAY:
+ return 51;
+ case GL_SAMPLER_BUFFER:
+ return 52;
+ case GL_SAMPLER_CUBE_SHADOW:
+ return 53;
+ case GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW:
+ return 54;
+ case GL_SAMPLER_EXTERNAL_OES:
+ return 55;
+ case GL_UNSIGNED_INT:
+ return 56;
+ case GL_UNSIGNED_INT_ATOMIC_COUNTER:
+ return 57;
+ case GL_UNSIGNED_INT_IMAGE_2D:
+ return 58;
+ case GL_UNSIGNED_INT_IMAGE_2D_ARRAY:
+ return 59;
+ case GL_UNSIGNED_INT_IMAGE_3D:
+ return 60;
+ case GL_UNSIGNED_INT_IMAGE_CUBE:
+ return 61;
+ case GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY:
+ return 62;
+ case GL_UNSIGNED_INT_IMAGE_BUFFER:
+ return 63;
+ case GL_UNSIGNED_INT_SAMPLER_2D:
+ return 64;
+ case GL_UNSIGNED_INT_SAMPLER_2D_ARRAY:
+ return 65;
+ case GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE:
+ return 66;
+ case GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY:
+ return 67;
+ case GL_UNSIGNED_INT_SAMPLER_3D:
+ return 68;
+ case GL_UNSIGNED_INT_SAMPLER_CUBE:
+ return 69;
+ case GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY:
+ return 70;
+ case GL_UNSIGNED_INT_SAMPLER_BUFFER:
+ return 71;
+ case GL_UNSIGNED_INT_VEC2:
+ return 72;
+ case GL_UNSIGNED_INT_VEC3:
+ return 73;
+ case GL_UNSIGNED_INT_VEC4:
+ return 74;
+ case GL_SAMPLER_VIDEO_IMAGE_WEBGL:
+ return 75;
+ case GL_SAMPLER_EXTERNAL_2D_Y2Y_EXT:
+ return 76;
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+}
+} // anonymous namespace
+
+const UniformTypeInfo &GetUniformTypeInfo(GLenum uniformType)
+{
+ ASSERT(kInfoTable[GetTypeInfoIndex(uniformType)].type == uniformType);
+ return kInfoTable[GetTypeInfoIndex(uniformType)];
+}
+
+} // namespace gl
diff --git a/gfx/angle/checkout/src/common/utilities.cpp b/gfx/angle/checkout/src/common/utilities.cpp
new file mode 100644
index 0000000000..f08774523c
--- /dev/null
+++ b/gfx/angle/checkout/src/common/utilities.cpp
@@ -0,0 +1,1509 @@
+//
+// Copyright 2002 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+
+// utilities.cpp: Conversion functions and other utility routines.
+
+#include "common/utilities.h"
+#include "GLES3/gl3.h"
+#include "common/mathutil.h"
+#include "common/platform.h"
+#include "common/string_utils.h"
+
+#include <set>
+
+#if defined(ANGLE_ENABLE_WINDOWS_UWP)
+# include <windows.applicationmodel.core.h>
+# include <windows.graphics.display.h>
+# include <wrl.h>
+# include <wrl/wrappers/corewrappers.h>
+#endif
+
+namespace
+{
+
+template <class IndexType>
+gl::IndexRange ComputeTypedIndexRange(const IndexType *indices,
+ size_t count,
+ bool primitiveRestartEnabled,
+ GLuint primitiveRestartIndex)
+{
+ ASSERT(count > 0);
+
+ IndexType minIndex = 0;
+ IndexType maxIndex = 0;
+ size_t nonPrimitiveRestartIndices = 0;
+
+ if (primitiveRestartEnabled)
+ {
+ // Find the first non-primitive restart index to initialize the min and max values
+ size_t i = 0;
+ for (; i < count; i++)
+ {
+ if (indices[i] != primitiveRestartIndex)
+ {
+ minIndex = indices[i];
+ maxIndex = indices[i];
+ nonPrimitiveRestartIndices++;
+ break;
+ }
+ }
+
+ // Loop over the rest of the indices
+ for (; i < count; i++)
+ {
+ if (indices[i] != primitiveRestartIndex)
+ {
+ if (minIndex > indices[i])
+ {
+ minIndex = indices[i];
+ }
+ if (maxIndex < indices[i])
+ {
+ maxIndex = indices[i];
+ }
+ nonPrimitiveRestartIndices++;
+ }
+ }
+ }
+ else
+ {
+ minIndex = indices[0];
+ maxIndex = indices[0];
+ nonPrimitiveRestartIndices = count;
+
+ for (size_t i = 1; i < count; i++)
+ {
+ if (minIndex > indices[i])
+ {
+ minIndex = indices[i];
+ }
+ if (maxIndex < indices[i])
+ {
+ maxIndex = indices[i];
+ }
+ }
+ }
+
+ return gl::IndexRange(static_cast<size_t>(minIndex), static_cast<size_t>(maxIndex),
+ nonPrimitiveRestartIndices);
+}
+
+} // anonymous namespace
+
+namespace gl
+{
+
+int VariableComponentCount(GLenum type)
+{
+ return VariableRowCount(type) * VariableColumnCount(type);
+}
+
+GLenum VariableComponentType(GLenum type)
+{
+ switch (type)
+ {
+ case GL_BOOL:
+ case GL_BOOL_VEC2:
+ case GL_BOOL_VEC3:
+ case GL_BOOL_VEC4:
+ return GL_BOOL;
+ case GL_FLOAT:
+ case GL_FLOAT_VEC2:
+ case GL_FLOAT_VEC3:
+ case GL_FLOAT_VEC4:
+ case GL_FLOAT_MAT2:
+ case GL_FLOAT_MAT3:
+ case GL_FLOAT_MAT4:
+ case GL_FLOAT_MAT2x3:
+ case GL_FLOAT_MAT3x2:
+ case GL_FLOAT_MAT2x4:
+ case GL_FLOAT_MAT4x2:
+ case GL_FLOAT_MAT3x4:
+ case GL_FLOAT_MAT4x3:
+ return GL_FLOAT;
+ case GL_INT:
+ case GL_SAMPLER_2D:
+ case GL_SAMPLER_2D_RECT_ANGLE:
+ case GL_SAMPLER_3D:
+ case GL_SAMPLER_CUBE:
+ case GL_SAMPLER_CUBE_MAP_ARRAY:
+ case GL_SAMPLER_2D_ARRAY:
+ case GL_SAMPLER_EXTERNAL_OES:
+ case GL_SAMPLER_2D_MULTISAMPLE:
+ case GL_SAMPLER_2D_MULTISAMPLE_ARRAY:
+ case GL_INT_SAMPLER_BUFFER:
+ case GL_INT_SAMPLER_2D:
+ case GL_INT_SAMPLER_3D:
+ case GL_INT_SAMPLER_CUBE:
+ case GL_INT_SAMPLER_CUBE_MAP_ARRAY:
+ case GL_INT_SAMPLER_2D_ARRAY:
+ case GL_INT_SAMPLER_2D_MULTISAMPLE:
+ case GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY:
+ case GL_UNSIGNED_INT_SAMPLER_2D:
+ case GL_UNSIGNED_INT_SAMPLER_3D:
+ case GL_UNSIGNED_INT_SAMPLER_CUBE:
+ case GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY:
+ case GL_UNSIGNED_INT_SAMPLER_2D_ARRAY:
+ case GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE:
+ case GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY:
+ case GL_SAMPLER_2D_SHADOW:
+ case GL_SAMPLER_BUFFER:
+ case GL_SAMPLER_CUBE_SHADOW:
+ case GL_SAMPLER_2D_ARRAY_SHADOW:
+ case GL_INT_VEC2:
+ case GL_INT_VEC3:
+ case GL_INT_VEC4:
+ case GL_IMAGE_2D:
+ case GL_INT_IMAGE_2D:
+ case GL_UNSIGNED_INT_IMAGE_2D:
+ case GL_IMAGE_3D:
+ case GL_INT_IMAGE_3D:
+ case GL_UNSIGNED_INT_IMAGE_3D:
+ case GL_IMAGE_2D_ARRAY:
+ case GL_INT_IMAGE_2D_ARRAY:
+ case GL_UNSIGNED_INT_IMAGE_2D_ARRAY:
+ case GL_IMAGE_CUBE:
+ case GL_INT_IMAGE_CUBE:
+ case GL_UNSIGNED_INT_IMAGE_CUBE:
+ case GL_IMAGE_CUBE_MAP_ARRAY:
+ case GL_INT_IMAGE_CUBE_MAP_ARRAY:
+ case GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY:
+ case GL_IMAGE_BUFFER:
+ case GL_INT_IMAGE_BUFFER:
+ case GL_UNSIGNED_INT_SAMPLER_BUFFER:
+ case GL_UNSIGNED_INT_IMAGE_BUFFER:
+ case GL_UNSIGNED_INT_ATOMIC_COUNTER:
+ case GL_SAMPLER_VIDEO_IMAGE_WEBGL:
+ case GL_SAMPLER_EXTERNAL_2D_Y2Y_EXT:
+ return GL_INT;
+ case GL_UNSIGNED_INT:
+ case GL_UNSIGNED_INT_VEC2:
+ case GL_UNSIGNED_INT_VEC3:
+ case GL_UNSIGNED_INT_VEC4:
+ return GL_UNSIGNED_INT;
+ default:
+ UNREACHABLE();
+ }
+
+ return GL_NONE;
+}
+
+size_t VariableComponentSize(GLenum type)
+{
+ switch (type)
+ {
+ case GL_BOOL:
+ return sizeof(GLint);
+ case GL_FLOAT:
+ return sizeof(GLfloat);
+ case GL_INT:
+ return sizeof(GLint);
+ case GL_UNSIGNED_INT:
+ return sizeof(GLuint);
+ default:
+ UNREACHABLE();
+ }
+
+ return 0;
+}
+
+size_t VariableInternalSize(GLenum type)
+{
+ // Expanded to 4-element vectors
+ return VariableComponentSize(VariableComponentType(type)) * VariableRowCount(type) * 4;
+}
+
+size_t VariableExternalSize(GLenum type)
+{
+ return VariableComponentSize(VariableComponentType(type)) * VariableComponentCount(type);
+}
+
+std::string GetGLSLTypeString(GLenum type)
+{
+ switch (type)
+ {
+ case GL_BOOL:
+ return "bool";
+ case GL_INT:
+ return "int";
+ case GL_UNSIGNED_INT:
+ return "uint";
+ case GL_FLOAT:
+ return "float";
+ case GL_BOOL_VEC2:
+ return "bvec2";
+ case GL_BOOL_VEC3:
+ return "bvec3";
+ case GL_BOOL_VEC4:
+ return "bvec4";
+ case GL_INT_VEC2:
+ return "ivec2";
+ case GL_INT_VEC3:
+ return "ivec3";
+ case GL_INT_VEC4:
+ return "ivec4";
+ case GL_FLOAT_VEC2:
+ return "vec2";
+ case GL_FLOAT_VEC3:
+ return "vec3";
+ case GL_FLOAT_VEC4:
+ return "vec4";
+ case GL_UNSIGNED_INT_VEC2:
+ return "uvec2";
+ case GL_UNSIGNED_INT_VEC3:
+ return "uvec3";
+ case GL_UNSIGNED_INT_VEC4:
+ return "uvec4";
+ case GL_FLOAT_MAT2:
+ return "mat2";
+ case GL_FLOAT_MAT3:
+ return "mat3";
+ case GL_FLOAT_MAT4:
+ return "mat4";
+ default:
+ UNREACHABLE();
+ return "";
+ }
+}
+
+GLenum VariableBoolVectorType(GLenum type)
+{
+ switch (type)
+ {
+ case GL_FLOAT:
+ case GL_INT:
+ case GL_UNSIGNED_INT:
+ return GL_BOOL;
+ case GL_FLOAT_VEC2:
+ case GL_INT_VEC2:
+ case GL_UNSIGNED_INT_VEC2:
+ return GL_BOOL_VEC2;
+ case GL_FLOAT_VEC3:
+ case GL_INT_VEC3:
+ case GL_UNSIGNED_INT_VEC3:
+ return GL_BOOL_VEC3;
+ case GL_FLOAT_VEC4:
+ case GL_INT_VEC4:
+ case GL_UNSIGNED_INT_VEC4:
+ return GL_BOOL_VEC4;
+
+ default:
+ UNREACHABLE();
+ return GL_NONE;
+ }
+}
+
+int VariableRowCount(GLenum type)
+{
+ switch (type)
+ {
+ case GL_NONE:
+ return 0;
+ case GL_BOOL:
+ case GL_FLOAT:
+ case GL_INT:
+ case GL_UNSIGNED_INT:
+ case GL_BOOL_VEC2:
+ case GL_FLOAT_VEC2:
+ case GL_INT_VEC2:
+ case GL_UNSIGNED_INT_VEC2:
+ case GL_BOOL_VEC3:
+ case GL_FLOAT_VEC3:
+ case GL_INT_VEC3:
+ case GL_UNSIGNED_INT_VEC3:
+ case GL_BOOL_VEC4:
+ case GL_FLOAT_VEC4:
+ case GL_INT_VEC4:
+ case GL_UNSIGNED_INT_VEC4:
+ case GL_SAMPLER_2D:
+ case GL_SAMPLER_3D:
+ case GL_SAMPLER_CUBE:
+ case GL_SAMPLER_2D_ARRAY:
+ case GL_SAMPLER_EXTERNAL_OES:
+ case GL_SAMPLER_2D_RECT_ANGLE:
+ case GL_SAMPLER_2D_MULTISAMPLE:
+ case GL_SAMPLER_2D_MULTISAMPLE_ARRAY:
+ case GL_SAMPLER_CUBE_MAP_ARRAY:
+ case GL_SAMPLER_BUFFER:
+ case GL_INT_SAMPLER_2D:
+ case GL_INT_SAMPLER_3D:
+ case GL_INT_SAMPLER_CUBE:
+ case GL_INT_SAMPLER_2D_ARRAY:
+ case GL_INT_SAMPLER_2D_MULTISAMPLE:
+ case GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY:
+ case GL_INT_SAMPLER_CUBE_MAP_ARRAY:
+ case GL_INT_SAMPLER_BUFFER:
+ case GL_UNSIGNED_INT_SAMPLER_2D:
+ case GL_UNSIGNED_INT_SAMPLER_3D:
+ case GL_UNSIGNED_INT_SAMPLER_CUBE:
+ case GL_UNSIGNED_INT_SAMPLER_2D_ARRAY:
+ case GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE:
+ case GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY:
+ case GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY:
+ case GL_UNSIGNED_INT_SAMPLER_BUFFER:
+ case GL_SAMPLER_2D_SHADOW:
+ case GL_SAMPLER_CUBE_SHADOW:
+ case GL_SAMPLER_2D_ARRAY_SHADOW:
+ case GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW:
+ case GL_IMAGE_2D:
+ case GL_INT_IMAGE_2D:
+ case GL_UNSIGNED_INT_IMAGE_2D:
+ case GL_IMAGE_2D_ARRAY:
+ case GL_INT_IMAGE_2D_ARRAY:
+ case GL_UNSIGNED_INT_IMAGE_2D_ARRAY:
+ case GL_IMAGE_3D:
+ case GL_INT_IMAGE_3D:
+ case GL_UNSIGNED_INT_IMAGE_3D:
+ case GL_IMAGE_CUBE:
+ case GL_INT_IMAGE_CUBE:
+ case GL_UNSIGNED_INT_IMAGE_CUBE:
+ case GL_UNSIGNED_INT_ATOMIC_COUNTER:
+ case GL_IMAGE_CUBE_MAP_ARRAY:
+ case GL_INT_IMAGE_CUBE_MAP_ARRAY:
+ case GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY:
+ case GL_IMAGE_BUFFER:
+ case GL_INT_IMAGE_BUFFER:
+ case GL_UNSIGNED_INT_IMAGE_BUFFER:
+ case GL_SAMPLER_VIDEO_IMAGE_WEBGL:
+ case GL_SAMPLER_EXTERNAL_2D_Y2Y_EXT:
+ return 1;
+ case GL_FLOAT_MAT2:
+ case GL_FLOAT_MAT3x2:
+ case GL_FLOAT_MAT4x2:
+ return 2;
+ case GL_FLOAT_MAT3:
+ case GL_FLOAT_MAT2x3:
+ case GL_FLOAT_MAT4x3:
+ return 3;
+ case GL_FLOAT_MAT4:
+ case GL_FLOAT_MAT2x4:
+ case GL_FLOAT_MAT3x4:
+ return 4;
+ default:
+ UNREACHABLE();
+ }
+
+ return 0;
+}
+
+int VariableColumnCount(GLenum type)
+{
+ switch (type)
+ {
+ case GL_NONE:
+ return 0;
+ case GL_BOOL:
+ case GL_FLOAT:
+ case GL_INT:
+ case GL_UNSIGNED_INT:
+ case GL_SAMPLER_2D:
+ case GL_SAMPLER_3D:
+ case GL_SAMPLER_CUBE:
+ case GL_SAMPLER_2D_ARRAY:
+ case GL_SAMPLER_2D_MULTISAMPLE:
+ case GL_SAMPLER_2D_MULTISAMPLE_ARRAY:
+ case GL_SAMPLER_CUBE_MAP_ARRAY:
+ case GL_SAMPLER_BUFFER:
+ case GL_INT_SAMPLER_2D:
+ case GL_INT_SAMPLER_3D:
+ case GL_INT_SAMPLER_CUBE:
+ case GL_INT_SAMPLER_2D_ARRAY:
+ case GL_INT_SAMPLER_2D_MULTISAMPLE:
+ case GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY:
+ case GL_INT_SAMPLER_CUBE_MAP_ARRAY:
+ case GL_INT_SAMPLER_BUFFER:
+ case GL_SAMPLER_EXTERNAL_OES:
+ case GL_SAMPLER_2D_RECT_ANGLE:
+ case GL_UNSIGNED_INT_SAMPLER_2D:
+ case GL_UNSIGNED_INT_SAMPLER_3D:
+ case GL_UNSIGNED_INT_SAMPLER_CUBE:
+ case GL_UNSIGNED_INT_SAMPLER_2D_ARRAY:
+ case GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE:
+ case GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY:
+ case GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY:
+ case GL_UNSIGNED_INT_SAMPLER_BUFFER:
+ case GL_SAMPLER_2D_SHADOW:
+ case GL_SAMPLER_CUBE_SHADOW:
+ case GL_SAMPLER_2D_ARRAY_SHADOW:
+ case GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW:
+ case GL_IMAGE_2D:
+ case GL_INT_IMAGE_2D:
+ case GL_UNSIGNED_INT_IMAGE_2D:
+ case GL_IMAGE_3D:
+ case GL_INT_IMAGE_3D:
+ case GL_UNSIGNED_INT_IMAGE_3D:
+ case GL_IMAGE_2D_ARRAY:
+ case GL_INT_IMAGE_2D_ARRAY:
+ case GL_UNSIGNED_INT_IMAGE_2D_ARRAY:
+ case GL_IMAGE_CUBE_MAP_ARRAY:
+ case GL_INT_IMAGE_CUBE_MAP_ARRAY:
+ case GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY:
+ case GL_IMAGE_BUFFER:
+ case GL_INT_IMAGE_BUFFER:
+ case GL_UNSIGNED_INT_IMAGE_BUFFER:
+ case GL_IMAGE_CUBE:
+ case GL_INT_IMAGE_CUBE:
+ case GL_UNSIGNED_INT_IMAGE_CUBE:
+ case GL_UNSIGNED_INT_ATOMIC_COUNTER:
+ case GL_SAMPLER_VIDEO_IMAGE_WEBGL:
+ case GL_SAMPLER_EXTERNAL_2D_Y2Y_EXT:
+ return 1;
+ case GL_BOOL_VEC2:
+ case GL_FLOAT_VEC2:
+ case GL_INT_VEC2:
+ case GL_UNSIGNED_INT_VEC2:
+ case GL_FLOAT_MAT2:
+ case GL_FLOAT_MAT2x3:
+ case GL_FLOAT_MAT2x4:
+ return 2;
+ case GL_BOOL_VEC3:
+ case GL_FLOAT_VEC3:
+ case GL_INT_VEC3:
+ case GL_UNSIGNED_INT_VEC3:
+ case GL_FLOAT_MAT3:
+ case GL_FLOAT_MAT3x2:
+ case GL_FLOAT_MAT3x4:
+ return 3;
+ case GL_BOOL_VEC4:
+ case GL_FLOAT_VEC4:
+ case GL_INT_VEC4:
+ case GL_UNSIGNED_INT_VEC4:
+ case GL_FLOAT_MAT4:
+ case GL_FLOAT_MAT4x2:
+ case GL_FLOAT_MAT4x3:
+ return 4;
+ default:
+ UNREACHABLE();
+ }
+
+ return 0;
+}
+
+bool IsSamplerType(GLenum type)
+{
+ switch (type)
+ {
+ case GL_SAMPLER_2D:
+ case GL_SAMPLER_3D:
+ case GL_SAMPLER_CUBE:
+ case GL_SAMPLER_2D_ARRAY:
+ case GL_SAMPLER_EXTERNAL_OES:
+ case GL_SAMPLER_2D_MULTISAMPLE:
+ case GL_SAMPLER_2D_MULTISAMPLE_ARRAY:
+ case GL_SAMPLER_CUBE_MAP_ARRAY:
+ case GL_SAMPLER_BUFFER:
+ case GL_SAMPLER_2D_RECT_ANGLE:
+ case GL_INT_SAMPLER_2D:
+ case GL_INT_SAMPLER_3D:
+ case GL_INT_SAMPLER_CUBE:
+ case GL_INT_SAMPLER_2D_ARRAY:
+ case GL_INT_SAMPLER_2D_MULTISAMPLE:
+ case GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY:
+ case GL_INT_SAMPLER_CUBE_MAP_ARRAY:
+ case GL_INT_SAMPLER_BUFFER:
+ case GL_UNSIGNED_INT_SAMPLER_2D:
+ case GL_UNSIGNED_INT_SAMPLER_3D:
+ case GL_UNSIGNED_INT_SAMPLER_CUBE:
+ case GL_UNSIGNED_INT_SAMPLER_2D_ARRAY:
+ case GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE:
+ case GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY:
+ case GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY:
+ case GL_UNSIGNED_INT_SAMPLER_BUFFER:
+ case GL_SAMPLER_2D_SHADOW:
+ case GL_SAMPLER_CUBE_SHADOW:
+ case GL_SAMPLER_2D_ARRAY_SHADOW:
+ case GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW:
+ case GL_SAMPLER_VIDEO_IMAGE_WEBGL:
+ case GL_SAMPLER_EXTERNAL_2D_Y2Y_EXT:
+ return true;
+ }
+
+ return false;
+}
+
+bool IsSamplerCubeType(GLenum type)
+{
+ switch (type)
+ {
+ case GL_SAMPLER_CUBE:
+ case GL_INT_SAMPLER_CUBE:
+ case GL_UNSIGNED_INT_SAMPLER_CUBE:
+ case GL_SAMPLER_CUBE_SHADOW:
+ return true;
+ }
+
+ return false;
+}
+
+bool IsSamplerYUVType(GLenum type)
+{
+ switch (type)
+ {
+ case GL_SAMPLER_EXTERNAL_2D_Y2Y_EXT:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool IsImageType(GLenum type)
+{
+ switch (type)
+ {
+ case GL_IMAGE_2D:
+ case GL_INT_IMAGE_2D:
+ case GL_UNSIGNED_INT_IMAGE_2D:
+ case GL_IMAGE_3D:
+ case GL_INT_IMAGE_3D:
+ case GL_UNSIGNED_INT_IMAGE_3D:
+ case GL_IMAGE_2D_ARRAY:
+ case GL_INT_IMAGE_2D_ARRAY:
+ case GL_UNSIGNED_INT_IMAGE_2D_ARRAY:
+ case GL_IMAGE_CUBE_MAP_ARRAY:
+ case GL_INT_IMAGE_CUBE_MAP_ARRAY:
+ case GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY:
+ case GL_IMAGE_BUFFER:
+ case GL_INT_IMAGE_BUFFER:
+ case GL_UNSIGNED_INT_IMAGE_BUFFER:
+ case GL_IMAGE_CUBE:
+ case GL_INT_IMAGE_CUBE:
+ case GL_UNSIGNED_INT_IMAGE_CUBE:
+ return true;
+ }
+ return false;
+}
+
+bool IsImage2DType(GLenum type)
+{
+ switch (type)
+ {
+ case GL_IMAGE_2D:
+ case GL_INT_IMAGE_2D:
+ case GL_UNSIGNED_INT_IMAGE_2D:
+ return true;
+ case GL_IMAGE_3D:
+ case GL_INT_IMAGE_3D:
+ case GL_UNSIGNED_INT_IMAGE_3D:
+ case GL_IMAGE_2D_ARRAY:
+ case GL_INT_IMAGE_2D_ARRAY:
+ case GL_UNSIGNED_INT_IMAGE_2D_ARRAY:
+ case GL_IMAGE_CUBE_MAP_ARRAY:
+ case GL_INT_IMAGE_CUBE_MAP_ARRAY:
+ case GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY:
+ case GL_IMAGE_CUBE:
+ case GL_INT_IMAGE_CUBE:
+ case GL_UNSIGNED_INT_IMAGE_CUBE:
+ case GL_IMAGE_BUFFER:
+ case GL_INT_IMAGE_BUFFER:
+ case GL_UNSIGNED_INT_IMAGE_BUFFER:
+ return false;
+ default:
+ UNREACHABLE();
+ return false;
+ }
+}
+
+bool IsAtomicCounterType(GLenum type)
+{
+ return type == GL_UNSIGNED_INT_ATOMIC_COUNTER;
+}
+
+bool IsOpaqueType(GLenum type)
+{
+ // ESSL 3.10 section 4.1.7 defines opaque types as: samplers, images and atomic counters.
+ return IsImageType(type) || IsSamplerType(type) || IsAtomicCounterType(type);
+}
+
+bool IsMatrixType(GLenum type)
+{
+ return VariableRowCount(type) > 1;
+}
+
+GLenum TransposeMatrixType(GLenum type)
+{
+ if (!IsMatrixType(type))
+ {
+ return type;
+ }
+
+ switch (type)
+ {
+ case GL_FLOAT_MAT2:
+ return GL_FLOAT_MAT2;
+ case GL_FLOAT_MAT3:
+ return GL_FLOAT_MAT3;
+ case GL_FLOAT_MAT4:
+ return GL_FLOAT_MAT4;
+ case GL_FLOAT_MAT2x3:
+ return GL_FLOAT_MAT3x2;
+ case GL_FLOAT_MAT3x2:
+ return GL_FLOAT_MAT2x3;
+ case GL_FLOAT_MAT2x4:
+ return GL_FLOAT_MAT4x2;
+ case GL_FLOAT_MAT4x2:
+ return GL_FLOAT_MAT2x4;
+ case GL_FLOAT_MAT3x4:
+ return GL_FLOAT_MAT4x3;
+ case GL_FLOAT_MAT4x3:
+ return GL_FLOAT_MAT3x4;
+ default:
+ UNREACHABLE();
+ return GL_NONE;
+ }
+}
+
+int MatrixRegisterCount(GLenum type, bool isRowMajorMatrix)
+{
+ ASSERT(IsMatrixType(type));
+ return isRowMajorMatrix ? VariableRowCount(type) : VariableColumnCount(type);
+}
+
+int MatrixComponentCount(GLenum type, bool isRowMajorMatrix)
+{
+ ASSERT(IsMatrixType(type));
+ return isRowMajorMatrix ? VariableColumnCount(type) : VariableRowCount(type);
+}
+
+int VariableRegisterCount(GLenum type)
+{
+ return IsMatrixType(type) ? VariableColumnCount(type) : 1;
+}
+
+int AllocateFirstFreeBits(unsigned int *bits, unsigned int allocationSize, unsigned int bitsSize)
+{
+ ASSERT(allocationSize <= bitsSize);
+
+ unsigned int mask = std::numeric_limits<unsigned int>::max() >>
+ (std::numeric_limits<unsigned int>::digits - allocationSize);
+
+ for (unsigned int i = 0; i < bitsSize - allocationSize + 1; i++)
+ {
+ if ((*bits & mask) == 0)
+ {
+ *bits |= mask;
+ return i;
+ }
+
+ mask <<= 1;
+ }
+
+ return -1;
+}
+
+IndexRange ComputeIndexRange(DrawElementsType indexType,
+ const GLvoid *indices,
+ size_t count,
+ bool primitiveRestartEnabled)
+{
+ switch (indexType)
+ {
+ case DrawElementsType::UnsignedByte:
+ return ComputeTypedIndexRange(static_cast<const GLubyte *>(indices), count,
+ primitiveRestartEnabled,
+ GetPrimitiveRestartIndex(indexType));
+ case DrawElementsType::UnsignedShort:
+ return ComputeTypedIndexRange(static_cast<const GLushort *>(indices), count,
+ primitiveRestartEnabled,
+ GetPrimitiveRestartIndex(indexType));
+ case DrawElementsType::UnsignedInt:
+ return ComputeTypedIndexRange(static_cast<const GLuint *>(indices), count,
+ primitiveRestartEnabled,
+ GetPrimitiveRestartIndex(indexType));
+ default:
+ UNREACHABLE();
+ return IndexRange();
+ }
+}
+
+GLuint GetPrimitiveRestartIndex(DrawElementsType indexType)
+{
+ switch (indexType)
+ {
+ case DrawElementsType::UnsignedByte:
+ return 0xFF;
+ case DrawElementsType::UnsignedShort:
+ return 0xFFFF;
+ case DrawElementsType::UnsignedInt:
+ return 0xFFFFFFFF;
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+}
+
+bool IsTriangleMode(PrimitiveMode drawMode)
+{
+ switch (drawMode)
+ {
+ case PrimitiveMode::Triangles:
+ case PrimitiveMode::TriangleFan:
+ case PrimitiveMode::TriangleStrip:
+ return true;
+ case PrimitiveMode::Points:
+ case PrimitiveMode::Lines:
+ case PrimitiveMode::LineLoop:
+ case PrimitiveMode::LineStrip:
+ return false;
+ default:
+ UNREACHABLE();
+ }
+
+ return false;
+}
+
+bool IsPolygonMode(PrimitiveMode mode)
+{
+ switch (mode)
+ {
+ case PrimitiveMode::Points:
+ case PrimitiveMode::Lines:
+ case PrimitiveMode::LineStrip:
+ case PrimitiveMode::LineLoop:
+ case PrimitiveMode::LinesAdjacency:
+ case PrimitiveMode::LineStripAdjacency:
+ return false;
+ default:
+ break;
+ }
+
+ return true;
+}
+
+namespace priv
+{
+const angle::PackedEnumMap<PrimitiveMode, bool> gLineModes = {
+ {{PrimitiveMode::LineLoop, true},
+ {PrimitiveMode::LineStrip, true},
+ {PrimitiveMode::LineStripAdjacency, true},
+ {PrimitiveMode::Lines, true}}};
+} // namespace priv
+
+bool IsIntegerFormat(GLenum unsizedFormat)
+{
+ switch (unsizedFormat)
+ {
+ case GL_RGBA_INTEGER:
+ case GL_RGB_INTEGER:
+ case GL_RG_INTEGER:
+ case GL_RED_INTEGER:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+// [OpenGL ES SL 3.00.4] Section 11 p. 120
+// Vertex Outs/Fragment Ins packing priorities
+int VariableSortOrder(GLenum type)
+{
+ switch (type)
+ {
+ // 1. Arrays of mat4 and mat4
+ // Non-square matrices of type matCxR consume the same space as a square
+ // matrix of type matN where N is the greater of C and R
+ case GL_FLOAT_MAT4:
+ case GL_FLOAT_MAT2x4:
+ case GL_FLOAT_MAT3x4:
+ case GL_FLOAT_MAT4x2:
+ case GL_FLOAT_MAT4x3:
+ return 0;
+
+ // 2. Arrays of mat2 and mat2 (since they occupy full rows)
+ case GL_FLOAT_MAT2:
+ return 1;
+
+ // 3. Arrays of vec4 and vec4
+ case GL_FLOAT_VEC4:
+ case GL_INT_VEC4:
+ case GL_BOOL_VEC4:
+ case GL_UNSIGNED_INT_VEC4:
+ return 2;
+
+ // 4. Arrays of mat3 and mat3
+ case GL_FLOAT_MAT3:
+ case GL_FLOAT_MAT2x3:
+ case GL_FLOAT_MAT3x2:
+ return 3;
+
+ // 5. Arrays of vec3 and vec3
+ case GL_FLOAT_VEC3:
+ case GL_INT_VEC3:
+ case GL_BOOL_VEC3:
+ case GL_UNSIGNED_INT_VEC3:
+ return 4;
+
+ // 6. Arrays of vec2 and vec2
+ case GL_FLOAT_VEC2:
+ case GL_INT_VEC2:
+ case GL_BOOL_VEC2:
+ case GL_UNSIGNED_INT_VEC2:
+ return 5;
+
+ // 7. Single component types
+ case GL_FLOAT:
+ case GL_INT:
+ case GL_BOOL:
+ case GL_UNSIGNED_INT:
+ case GL_SAMPLER_2D:
+ case GL_SAMPLER_CUBE:
+ case GL_SAMPLER_EXTERNAL_OES:
+ case GL_SAMPLER_2D_RECT_ANGLE:
+ case GL_SAMPLER_2D_ARRAY:
+ case GL_SAMPLER_2D_MULTISAMPLE:
+ case GL_SAMPLER_2D_MULTISAMPLE_ARRAY:
+ case GL_SAMPLER_3D:
+ case GL_INT_SAMPLER_2D:
+ case GL_INT_SAMPLER_3D:
+ case GL_INT_SAMPLER_CUBE:
+ case GL_INT_SAMPLER_2D_ARRAY:
+ case GL_INT_SAMPLER_2D_MULTISAMPLE:
+ case GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY:
+ case GL_UNSIGNED_INT_SAMPLER_2D:
+ case GL_UNSIGNED_INT_SAMPLER_3D:
+ case GL_UNSIGNED_INT_SAMPLER_CUBE:
+ case GL_UNSIGNED_INT_SAMPLER_2D_ARRAY:
+ case GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE:
+ case GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY:
+ case GL_SAMPLER_2D_SHADOW:
+ case GL_SAMPLER_2D_ARRAY_SHADOW:
+ case GL_SAMPLER_CUBE_SHADOW:
+ case GL_IMAGE_2D:
+ case GL_INT_IMAGE_2D:
+ case GL_UNSIGNED_INT_IMAGE_2D:
+ case GL_IMAGE_3D:
+ case GL_INT_IMAGE_3D:
+ case GL_UNSIGNED_INT_IMAGE_3D:
+ case GL_IMAGE_2D_ARRAY:
+ case GL_INT_IMAGE_2D_ARRAY:
+ case GL_UNSIGNED_INT_IMAGE_2D_ARRAY:
+ case GL_IMAGE_CUBE:
+ case GL_INT_IMAGE_CUBE:
+ case GL_UNSIGNED_INT_IMAGE_CUBE:
+ case GL_UNSIGNED_INT_ATOMIC_COUNTER:
+ case GL_SAMPLER_VIDEO_IMAGE_WEBGL:
+ case GL_SAMPLER_EXTERNAL_2D_Y2Y_EXT:
+ return 6;
+
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+}
+
+std::string ParseResourceName(const std::string &name, std::vector<unsigned int> *outSubscripts)
+{
+ if (outSubscripts)
+ {
+ outSubscripts->clear();
+ }
+ // Strip any trailing array indexing operators and retrieve the subscripts.
+ size_t baseNameLength = name.length();
+ bool hasIndex = true;
+ while (hasIndex)
+ {
+ size_t open = name.find_last_of('[', baseNameLength - 1);
+ size_t close = name.find_last_of(']', baseNameLength - 1);
+ hasIndex = (open != std::string::npos) && (close == baseNameLength - 1);
+ if (hasIndex)
+ {
+ baseNameLength = open;
+ if (outSubscripts)
+ {
+ int index = atoi(name.substr(open + 1).c_str());
+ if (index >= 0)
+ {
+ outSubscripts->push_back(index);
+ }
+ else
+ {
+ outSubscripts->push_back(GL_INVALID_INDEX);
+ }
+ }
+ }
+ }
+
+ return name.substr(0, baseNameLength);
+}
+
+bool IsBuiltInName(const char *name)
+{
+ return angle::BeginsWith(name, "gl_");
+}
+
+std::string StripLastArrayIndex(const std::string &name)
+{
+ size_t strippedNameLength = name.find_last_of('[');
+ if (strippedNameLength != std::string::npos && name.back() == ']')
+ {
+ return name.substr(0, strippedNameLength);
+ }
+ return name;
+}
+
+bool SamplerNameContainsNonZeroArrayElement(const std::string &name)
+{
+ constexpr char kZERO_ELEMENT[] = "[0]";
+
+ size_t start = 0;
+ while (true)
+ {
+ start = name.find(kZERO_ELEMENT[0], start);
+ if (start == std::string::npos)
+ {
+ break;
+ }
+ if (name.compare(start, strlen(kZERO_ELEMENT), kZERO_ELEMENT) != 0)
+ {
+ return true;
+ }
+ start++;
+ }
+ return false;
+}
+
+unsigned int ArraySizeProduct(const std::vector<unsigned int> &arraySizes)
+{
+ unsigned int arraySizeProduct = 1u;
+ for (unsigned int arraySize : arraySizes)
+ {
+ arraySizeProduct *= arraySize;
+ }
+ return arraySizeProduct;
+}
+
+unsigned int InnerArraySizeProduct(const std::vector<unsigned int> &arraySizes)
+{
+ unsigned int arraySizeProduct = 1u;
+ for (size_t index = 0; index + 1 < arraySizes.size(); ++index)
+ {
+ arraySizeProduct *= arraySizes[index];
+ }
+ return arraySizeProduct;
+}
+
+unsigned int OutermostArraySize(const std::vector<unsigned int> &arraySizes)
+{
+ return arraySizes.empty() || arraySizes.back() == 0 ? 1 : arraySizes.back();
+}
+
+unsigned int ParseArrayIndex(const std::string &name, size_t *nameLengthWithoutArrayIndexOut)
+{
+ ASSERT(nameLengthWithoutArrayIndexOut != nullptr);
+
+ // Strip any trailing array operator and retrieve the subscript
+ size_t open = name.find_last_of('[');
+ if (open != std::string::npos && name.back() == ']')
+ {
+ bool indexIsValidDecimalNumber = true;
+ for (size_t i = open + 1; i < name.length() - 1u; ++i)
+ {
+ if (!isdigit(name[i]))
+ {
+ indexIsValidDecimalNumber = false;
+ break;
+ }
+
+ // Leading zeroes are invalid
+ if ((i == (open + 1)) && (name[i] == '0') && (name[i + 1] != ']'))
+ {
+ indexIsValidDecimalNumber = false;
+ break;
+ }
+ }
+ if (indexIsValidDecimalNumber)
+ {
+ errno = 0; // reset global error flag.
+ unsigned long subscript =
+ strtoul(name.c_str() + open + 1, /*endptr*/ nullptr, /*radix*/ 10);
+
+ // Check if resulting integer is out-of-range or conversion error.
+ if (angle::base::IsValueInRangeForNumericType<uint32_t>(subscript) &&
+ !(subscript == ULONG_MAX && errno == ERANGE) && !(errno != 0 && subscript == 0))
+ {
+ *nameLengthWithoutArrayIndexOut = open;
+ return static_cast<unsigned int>(subscript);
+ }
+ }
+ }
+
+ *nameLengthWithoutArrayIndexOut = name.length();
+ return GL_INVALID_INDEX;
+}
+
+const char *GetGenericErrorMessage(GLenum error)
+{
+ switch (error)
+ {
+ case GL_NO_ERROR:
+ return "";
+ case GL_INVALID_ENUM:
+ return "Invalid enum.";
+ case GL_INVALID_VALUE:
+ return "Invalid value.";
+ case GL_INVALID_OPERATION:
+ return "Invalid operation.";
+ case GL_STACK_OVERFLOW:
+ return "Stack overflow.";
+ case GL_STACK_UNDERFLOW:
+ return "Stack underflow.";
+ case GL_OUT_OF_MEMORY:
+ return "Out of memory.";
+ case GL_INVALID_FRAMEBUFFER_OPERATION:
+ return "Invalid framebuffer operation.";
+ default:
+ UNREACHABLE();
+ return "Unknown error.";
+ }
+}
+
+unsigned int ElementTypeSize(GLenum elementType)
+{
+ switch (elementType)
+ {
+ case GL_UNSIGNED_BYTE:
+ return sizeof(GLubyte);
+ case GL_UNSIGNED_SHORT:
+ return sizeof(GLushort);
+ case GL_UNSIGNED_INT:
+ return sizeof(GLuint);
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+}
+
+bool IsMipmapFiltered(GLenum minFilterMode)
+{
+ switch (minFilterMode)
+ {
+ case GL_NEAREST:
+ case GL_LINEAR:
+ return false;
+ case GL_NEAREST_MIPMAP_NEAREST:
+ case GL_LINEAR_MIPMAP_NEAREST:
+ case GL_NEAREST_MIPMAP_LINEAR:
+ case GL_LINEAR_MIPMAP_LINEAR:
+ return true;
+ default:
+ UNREACHABLE();
+ return false;
+ }
+}
+
+PipelineType GetPipelineType(ShaderType type)
+{
+ switch (type)
+ {
+ case ShaderType::Vertex:
+ case ShaderType::Fragment:
+ case ShaderType::Geometry:
+ return PipelineType::GraphicsPipeline;
+ case ShaderType::Compute:
+ return PipelineType::ComputePipeline;
+ default:
+ UNREACHABLE();
+ return PipelineType::GraphicsPipeline;
+ }
+}
+
+const char *GetDebugMessageSourceString(GLenum source)
+{
+ switch (source)
+ {
+ case GL_DEBUG_SOURCE_API:
+ return "API";
+ case GL_DEBUG_SOURCE_WINDOW_SYSTEM:
+ return "Window System";
+ case GL_DEBUG_SOURCE_SHADER_COMPILER:
+ return "Shader Compiler";
+ case GL_DEBUG_SOURCE_THIRD_PARTY:
+ return "Third Party";
+ case GL_DEBUG_SOURCE_APPLICATION:
+ return "Application";
+ case GL_DEBUG_SOURCE_OTHER:
+ return "Other";
+ default:
+ return "Unknown Source";
+ }
+}
+
+const char *GetDebugMessageTypeString(GLenum type)
+{
+ switch (type)
+ {
+ case GL_DEBUG_TYPE_ERROR:
+ return "Error";
+ case GL_DEBUG_TYPE_DEPRECATED_BEHAVIOR:
+ return "Deprecated behavior";
+ case GL_DEBUG_TYPE_UNDEFINED_BEHAVIOR:
+ return "Undefined behavior";
+ case GL_DEBUG_TYPE_PORTABILITY:
+ return "Portability";
+ case GL_DEBUG_TYPE_PERFORMANCE:
+ return "Performance";
+ case GL_DEBUG_TYPE_OTHER:
+ return "Other";
+ case GL_DEBUG_TYPE_MARKER:
+ return "Marker";
+ default:
+ return "Unknown Type";
+ }
+}
+
+const char *GetDebugMessageSeverityString(GLenum severity)
+{
+ switch (severity)
+ {
+ case GL_DEBUG_SEVERITY_HIGH:
+ return "High";
+ case GL_DEBUG_SEVERITY_MEDIUM:
+ return "Medium";
+ case GL_DEBUG_SEVERITY_LOW:
+ return "Low";
+ case GL_DEBUG_SEVERITY_NOTIFICATION:
+ return "Notification";
+ default:
+ return "Unknown Severity";
+ }
+}
+
+ShaderType GetShaderTypeFromBitfield(size_t singleShaderType)
+{
+ switch (singleShaderType)
+ {
+ case GL_VERTEX_SHADER_BIT:
+ return ShaderType::Vertex;
+ case GL_FRAGMENT_SHADER_BIT:
+ return ShaderType::Fragment;
+ case GL_COMPUTE_SHADER_BIT:
+ return ShaderType::Compute;
+ case GL_GEOMETRY_SHADER_BIT:
+ return ShaderType::Geometry;
+ case GL_TESS_CONTROL_SHADER_BIT:
+ return ShaderType::TessControl;
+ case GL_TESS_EVALUATION_SHADER_BIT:
+ return ShaderType::TessEvaluation;
+ default:
+ return ShaderType::InvalidEnum;
+ }
+}
+
+GLbitfield GetBitfieldFromShaderType(ShaderType shaderType)
+{
+ switch (shaderType)
+ {
+ case ShaderType::Vertex:
+ return GL_VERTEX_SHADER_BIT;
+ case ShaderType::Fragment:
+ return GL_FRAGMENT_SHADER_BIT;
+ case ShaderType::Compute:
+ return GL_COMPUTE_SHADER_BIT;
+ case ShaderType::Geometry:
+ return GL_GEOMETRY_SHADER_BIT;
+ case ShaderType::TessControl:
+ return GL_TESS_CONTROL_SHADER_BIT;
+ case ShaderType::TessEvaluation:
+ return GL_TESS_EVALUATION_SHADER_BIT;
+ default:
+ UNREACHABLE();
+ return GL_ZERO;
+ }
+}
+
+bool ShaderTypeSupportsTransformFeedback(ShaderType shaderType)
+{
+ switch (shaderType)
+ {
+ case ShaderType::Vertex:
+ case ShaderType::Geometry:
+ case ShaderType::TessEvaluation:
+ return true;
+ default:
+ return false;
+ }
+}
+
+ShaderType GetLastPreFragmentStage(ShaderBitSet shaderTypes)
+{
+ shaderTypes.reset(ShaderType::Fragment);
+ shaderTypes.reset(ShaderType::Compute);
+ return shaderTypes.any() ? shaderTypes.last() : ShaderType::InvalidEnum;
+}
+} // namespace gl
+
+namespace egl
+{
+static_assert(EGL_GL_TEXTURE_CUBE_MAP_NEGATIVE_X_KHR - EGL_GL_TEXTURE_CUBE_MAP_POSITIVE_X_KHR == 1,
+ "Unexpected EGL cube map enum value.");
+static_assert(EGL_GL_TEXTURE_CUBE_MAP_POSITIVE_Y_KHR - EGL_GL_TEXTURE_CUBE_MAP_POSITIVE_X_KHR == 2,
+ "Unexpected EGL cube map enum value.");
+static_assert(EGL_GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_KHR - EGL_GL_TEXTURE_CUBE_MAP_POSITIVE_X_KHR == 3,
+ "Unexpected EGL cube map enum value.");
+static_assert(EGL_GL_TEXTURE_CUBE_MAP_POSITIVE_Z_KHR - EGL_GL_TEXTURE_CUBE_MAP_POSITIVE_X_KHR == 4,
+ "Unexpected EGL cube map enum value.");
+static_assert(EGL_GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_KHR - EGL_GL_TEXTURE_CUBE_MAP_POSITIVE_X_KHR == 5,
+ "Unexpected EGL cube map enum value.");
+
+bool IsCubeMapTextureTarget(EGLenum target)
+{
+ return (target >= FirstCubeMapTextureTarget && target <= LastCubeMapTextureTarget);
+}
+
+size_t CubeMapTextureTargetToLayerIndex(EGLenum target)
+{
+ ASSERT(IsCubeMapTextureTarget(target));
+ return target - static_cast<size_t>(FirstCubeMapTextureTarget);
+}
+
+EGLenum LayerIndexToCubeMapTextureTarget(size_t index)
+{
+ ASSERT(index <= (LastCubeMapTextureTarget - FirstCubeMapTextureTarget));
+ return FirstCubeMapTextureTarget + static_cast<GLenum>(index);
+}
+
+bool IsTextureTarget(EGLenum target)
+{
+ switch (target)
+ {
+ case EGL_GL_TEXTURE_2D_KHR:
+ case EGL_GL_TEXTURE_CUBE_MAP_POSITIVE_X_KHR:
+ case EGL_GL_TEXTURE_CUBE_MAP_NEGATIVE_X_KHR:
+ case EGL_GL_TEXTURE_CUBE_MAP_POSITIVE_Y_KHR:
+ case EGL_GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_KHR:
+ case EGL_GL_TEXTURE_CUBE_MAP_POSITIVE_Z_KHR:
+ case EGL_GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_KHR:
+ case EGL_GL_TEXTURE_3D_KHR:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool IsRenderbufferTarget(EGLenum target)
+{
+ return target == EGL_GL_RENDERBUFFER_KHR;
+}
+
+bool IsExternalImageTarget(EGLenum target)
+{
+ switch (target)
+ {
+ case EGL_NATIVE_BUFFER_ANDROID:
+ case EGL_D3D11_TEXTURE_ANGLE:
+ case EGL_LINUX_DMA_BUF_EXT:
+ case EGL_METAL_TEXTURE_ANGLE:
+ case EGL_VULKAN_IMAGE_ANGLE:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+const char *GetGenericErrorMessage(EGLint error)
+{
+ switch (error)
+ {
+ case EGL_SUCCESS:
+ return "";
+ case EGL_NOT_INITIALIZED:
+ return "Not initialized.";
+ case EGL_BAD_ACCESS:
+ return "Bad access.";
+ case EGL_BAD_ALLOC:
+ return "Bad allocation.";
+ case EGL_BAD_ATTRIBUTE:
+ return "Bad attribute.";
+ case EGL_BAD_CONFIG:
+ return "Bad config.";
+ case EGL_BAD_CONTEXT:
+ return "Bad context.";
+ case EGL_BAD_CURRENT_SURFACE:
+ return "Bad current surface.";
+ case EGL_BAD_DISPLAY:
+ return "Bad display.";
+ case EGL_BAD_MATCH:
+ return "Bad match.";
+ case EGL_BAD_NATIVE_WINDOW:
+ return "Bad native window.";
+ case EGL_BAD_NATIVE_PIXMAP:
+ return "Bad native pixmap.";
+ case EGL_BAD_PARAMETER:
+ return "Bad parameter.";
+ case EGL_BAD_SURFACE:
+ return "Bad surface.";
+ case EGL_CONTEXT_LOST:
+ return "Context lost.";
+ case EGL_BAD_STREAM_KHR:
+ return "Bad stream.";
+ case EGL_BAD_STATE_KHR:
+ return "Bad state.";
+ case EGL_BAD_DEVICE_EXT:
+ return "Bad device.";
+ default:
+ UNREACHABLE();
+ return "Unknown error.";
+ }
+}
+
+} // namespace egl
+
+namespace egl_gl
+{
+GLuint EGLClientBufferToGLObjectHandle(EGLClientBuffer buffer)
+{
+ return static_cast<GLuint>(reinterpret_cast<uintptr_t>(buffer));
+}
+} // namespace egl_gl
+
+namespace gl_egl
+{
+EGLenum GLComponentTypeToEGLColorComponentType(GLenum glComponentType)
+{
+ switch (glComponentType)
+ {
+ case GL_FLOAT:
+ return EGL_COLOR_COMPONENT_TYPE_FLOAT_EXT;
+
+ case GL_UNSIGNED_NORMALIZED:
+ return EGL_COLOR_COMPONENT_TYPE_FIXED_EXT;
+
+ default:
+ UNREACHABLE();
+ return EGL_NONE;
+ }
+}
+
+EGLClientBuffer GLObjectHandleToEGLClientBuffer(GLuint handle)
+{
+ return reinterpret_cast<EGLClientBuffer>(static_cast<uintptr_t>(handle));
+}
+
+} // namespace gl_egl
+
+namespace angle
+{
+bool IsDrawEntryPoint(EntryPoint entryPoint)
+{
+ switch (entryPoint)
+ {
+ case EntryPoint::GLDrawArrays:
+ case EntryPoint::GLDrawArraysIndirect:
+ case EntryPoint::GLDrawArraysInstanced:
+ case EntryPoint::GLDrawArraysInstancedANGLE:
+ case EntryPoint::GLDrawArraysInstancedBaseInstance:
+ case EntryPoint::GLDrawArraysInstancedBaseInstanceANGLE:
+ case EntryPoint::GLDrawArraysInstancedEXT:
+ case EntryPoint::GLDrawElements:
+ case EntryPoint::GLDrawElementsBaseVertex:
+ case EntryPoint::GLDrawElementsBaseVertexEXT:
+ case EntryPoint::GLDrawElementsBaseVertexOES:
+ case EntryPoint::GLDrawElementsIndirect:
+ case EntryPoint::GLDrawElementsInstanced:
+ case EntryPoint::GLDrawElementsInstancedANGLE:
+ case EntryPoint::GLDrawElementsInstancedBaseInstance:
+ case EntryPoint::GLDrawElementsInstancedBaseVertex:
+ case EntryPoint::GLDrawElementsInstancedBaseVertexBaseInstance:
+ case EntryPoint::GLDrawElementsInstancedBaseVertexBaseInstanceANGLE:
+ case EntryPoint::GLDrawElementsInstancedBaseVertexEXT:
+ case EntryPoint::GLDrawElementsInstancedBaseVertexOES:
+ case EntryPoint::GLDrawElementsInstancedEXT:
+ case EntryPoint::GLDrawPixels:
+ case EntryPoint::GLDrawRangeElements:
+ case EntryPoint::GLDrawRangeElementsBaseVertex:
+ case EntryPoint::GLDrawRangeElementsBaseVertexEXT:
+ case EntryPoint::GLDrawRangeElementsBaseVertexOES:
+ case EntryPoint::GLDrawTexfOES:
+ case EntryPoint::GLDrawTexfvOES:
+ case EntryPoint::GLDrawTexiOES:
+ case EntryPoint::GLDrawTexivOES:
+ case EntryPoint::GLDrawTexsOES:
+ case EntryPoint::GLDrawTexsvOES:
+ case EntryPoint::GLDrawTexxOES:
+ case EntryPoint::GLDrawTexxvOES:
+ case EntryPoint::GLDrawTransformFeedback:
+ case EntryPoint::GLDrawTransformFeedbackInstanced:
+ case EntryPoint::GLDrawTransformFeedbackStream:
+ case EntryPoint::GLDrawTransformFeedbackStreamInstanced:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool IsDispatchEntryPoint(EntryPoint entryPoint)
+{
+ switch (entryPoint)
+ {
+ case EntryPoint::GLDispatchCompute:
+ case EntryPoint::GLDispatchComputeIndirect:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool IsClearEntryPoint(EntryPoint entryPoint)
+{
+ switch (entryPoint)
+ {
+ case EntryPoint::GLClear:
+ case EntryPoint::GLClearBufferfi:
+ case EntryPoint::GLClearBufferfv:
+ case EntryPoint::GLClearBufferiv:
+ case EntryPoint::GLClearBufferuiv:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool IsQueryEntryPoint(EntryPoint entryPoint)
+{
+ switch (entryPoint)
+ {
+ case EntryPoint::GLBeginQuery:
+ case EntryPoint::GLBeginQueryEXT:
+ case EntryPoint::GLBeginQueryIndexed:
+ case EntryPoint::GLEndQuery:
+ case EntryPoint::GLEndQueryEXT:
+ case EntryPoint::GLEndQueryIndexed:
+ return true;
+ default:
+ return false;
+ }
+}
+} // namespace angle
+
+#if !defined(ANGLE_ENABLE_WINDOWS_UWP)
+void writeFile(const char *path, const void *content, size_t size)
+{
+ FILE *file = fopen(path, "w");
+ if (!file)
+ {
+ UNREACHABLE();
+ return;
+ }
+
+ fwrite(content, sizeof(char), size, file);
+ fclose(file);
+}
+#endif // !ANGLE_ENABLE_WINDOWS_UWP
+
+#if defined(ANGLE_PLATFORM_WINDOWS)
+
+// Causes the thread to relinquish the remainder of its time slice to any
+// other thread that is ready to run.If there are no other threads ready
+// to run, the function returns immediately, and the thread continues execution.
+void ScheduleYield()
+{
+ Sleep(0);
+}
+
+#endif
diff --git a/gfx/angle/checkout/src/common/utilities.h b/gfx/angle/checkout/src/common/utilities.h
new file mode 100644
index 0000000000..cae620d03b
--- /dev/null
+++ b/gfx/angle/checkout/src/common/utilities.h
@@ -0,0 +1,336 @@
+//
+// Copyright 2002 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+
+// utilities.h: Conversion functions and other utility routines.
+
+#ifndef COMMON_UTILITIES_H_
+#define COMMON_UTILITIES_H_
+
+#include <EGL/egl.h>
+#include <EGL/eglext.h>
+#include <GLSLANG/ShaderLang.h>
+
+#include <math.h>
+#include <string>
+#include <vector>
+
+#include "angle_gl.h"
+
+#include "common/PackedEnums.h"
+#include "common/mathutil.h"
+#include "common/platform.h"
+
+namespace sh
+{
+struct ShaderVariable;
+}
+
+constexpr bool ShPixelLocalStorageTypeUsesImages(ShPixelLocalStorageType type)
+{
+ return type == ShPixelLocalStorageType::ImageStoreR32PackedFormats ||
+ type == ShPixelLocalStorageType::ImageStoreNativeFormats;
+}
+
+namespace gl
+{
+
+int VariableComponentCount(GLenum type);
+GLenum VariableComponentType(GLenum type);
+size_t VariableComponentSize(GLenum type);
+size_t VariableInternalSize(GLenum type);
+size_t VariableExternalSize(GLenum type);
+int VariableRowCount(GLenum type);
+int VariableColumnCount(GLenum type);
+bool IsSamplerType(GLenum type);
+bool IsSamplerCubeType(GLenum type);
+bool IsSamplerYUVType(GLenum type);
+bool IsImageType(GLenum type);
+bool IsImage2DType(GLenum type);
+bool IsAtomicCounterType(GLenum type);
+bool IsOpaqueType(GLenum type);
+bool IsMatrixType(GLenum type);
+GLenum TransposeMatrixType(GLenum type);
+int VariableRegisterCount(GLenum type);
+int MatrixRegisterCount(GLenum type, bool isRowMajorMatrix);
+int MatrixComponentCount(GLenum type, bool isRowMajorMatrix);
+int VariableSortOrder(GLenum type);
+GLenum VariableBoolVectorType(GLenum type);
+std::string GetGLSLTypeString(GLenum type);
+
+int AllocateFirstFreeBits(unsigned int *bits, unsigned int allocationSize, unsigned int bitsSize);
+
+// Parse the base resource name and array indices. Returns the base name of the resource.
+// If the provided name doesn't index an array, the outSubscripts vector will be empty.
+// If the provided name indexes an array, the outSubscripts vector will contain indices with
+// outermost array indices in the back. If an array index is invalid, GL_INVALID_INDEX is added to
+// outSubscripts.
+std::string ParseResourceName(const std::string &name, std::vector<unsigned int> *outSubscripts);
+
+bool IsBuiltInName(const char *name);
+ANGLE_INLINE bool IsBuiltInName(const std::string &name)
+{
+ return IsBuiltInName(name.c_str());
+}
+
+// Strips only the last array index from a resource name.
+std::string StripLastArrayIndex(const std::string &name);
+
+bool SamplerNameContainsNonZeroArrayElement(const std::string &name);
+
+// Find the range of index values in the provided indices pointer. Primitive restart indices are
+// only counted in the range if primitive restart is disabled.
+IndexRange ComputeIndexRange(DrawElementsType indexType,
+ const GLvoid *indices,
+ size_t count,
+ bool primitiveRestartEnabled);
+
+// Get the primitive restart index value for the given index type.
+GLuint GetPrimitiveRestartIndex(DrawElementsType indexType);
+
+// Get the primitive restart index value with the given C++ type.
+template <typename T>
+constexpr T GetPrimitiveRestartIndexFromType()
+{
+ return std::numeric_limits<T>::max();
+}
+
+static_assert(GetPrimitiveRestartIndexFromType<uint8_t>() == 0xFF,
+ "verify restart index for uint8_t values");
+static_assert(GetPrimitiveRestartIndexFromType<uint16_t>() == 0xFFFF,
+ "verify restart index for uint8_t values");
+static_assert(GetPrimitiveRestartIndexFromType<uint32_t>() == 0xFFFFFFFF,
+ "verify restart index for uint8_t values");
+
+bool IsTriangleMode(PrimitiveMode drawMode);
+bool IsPolygonMode(PrimitiveMode mode);
+
+namespace priv
+{
+extern const angle::PackedEnumMap<PrimitiveMode, bool> gLineModes;
+} // namespace priv
+
+ANGLE_INLINE bool IsLineMode(PrimitiveMode primitiveMode)
+{
+ return priv::gLineModes[primitiveMode];
+}
+
+bool IsIntegerFormat(GLenum unsizedFormat);
+
+// Returns the product of the sizes in the vector, or 1 if the vector is empty. Doesn't currently
+// perform overflow checks.
+unsigned int ArraySizeProduct(const std::vector<unsigned int> &arraySizes);
+// Returns the product of the sizes in the vector except for the outermost dimension, or 1 if the
+// vector is empty.
+unsigned int InnerArraySizeProduct(const std::vector<unsigned int> &arraySizes);
+// Returns the outermost array dimension, or 1 if the vector is empty.
+unsigned int OutermostArraySize(const std::vector<unsigned int> &arraySizes);
+
+// Return the array index at the end of name, and write the length of name before the final array
+// index into nameLengthWithoutArrayIndexOut. In case name doesn't include an array index, return
+// GL_INVALID_INDEX and write the length of the original string.
+unsigned int ParseArrayIndex(const std::string &name, size_t *nameLengthWithoutArrayIndexOut);
+
+enum class SamplerFormat : uint8_t
+{
+ Float = 0,
+ Unsigned = 1,
+ Signed = 2,
+ Shadow = 3,
+
+ InvalidEnum = 4,
+ EnumCount = 4,
+};
+
+struct UniformTypeInfo final : angle::NonCopyable
+{
+ inline constexpr UniformTypeInfo(GLenum type,
+ GLenum componentType,
+ GLenum textureType,
+ GLenum transposedMatrixType,
+ GLenum boolVectorType,
+ SamplerFormat samplerFormat,
+ int rowCount,
+ int columnCount,
+ int componentCount,
+ size_t componentSize,
+ size_t internalSize,
+ size_t externalSize,
+ bool isSampler,
+ bool isMatrixType,
+ bool isImageType);
+
+ GLenum type;
+ GLenum componentType;
+ GLenum textureType;
+ GLenum transposedMatrixType;
+ GLenum boolVectorType;
+ SamplerFormat samplerFormat;
+ int rowCount;
+ int columnCount;
+ int componentCount;
+ size_t componentSize;
+ size_t internalSize;
+ size_t externalSize;
+ bool isSampler;
+ bool isMatrixType;
+ bool isImageType;
+};
+
+inline constexpr UniformTypeInfo::UniformTypeInfo(GLenum type,
+ GLenum componentType,
+ GLenum textureType,
+ GLenum transposedMatrixType,
+ GLenum boolVectorType,
+ SamplerFormat samplerFormat,
+ int rowCount,
+ int columnCount,
+ int componentCount,
+ size_t componentSize,
+ size_t internalSize,
+ size_t externalSize,
+ bool isSampler,
+ bool isMatrixType,
+ bool isImageType)
+ : type(type),
+ componentType(componentType),
+ textureType(textureType),
+ transposedMatrixType(transposedMatrixType),
+ boolVectorType(boolVectorType),
+ samplerFormat(samplerFormat),
+ rowCount(rowCount),
+ columnCount(columnCount),
+ componentCount(componentCount),
+ componentSize(componentSize),
+ internalSize(internalSize),
+ externalSize(externalSize),
+ isSampler(isSampler),
+ isMatrixType(isMatrixType),
+ isImageType(isImageType)
+{}
+
+const UniformTypeInfo &GetUniformTypeInfo(GLenum uniformType);
+
+const char *GetGenericErrorMessage(GLenum error);
+
+unsigned int ElementTypeSize(GLenum elementType);
+
+bool IsMipmapFiltered(GLenum minFilterMode);
+
+template <typename T>
+T GetClampedVertexCount(size_t vertexCount)
+{
+ static constexpr size_t kMax = static_cast<size_t>(std::numeric_limits<T>::max());
+ return static_cast<T>(vertexCount > kMax ? kMax : vertexCount);
+}
+
+enum class PipelineType
+{
+ GraphicsPipeline = 0,
+ ComputePipeline = 1,
+};
+
+PipelineType GetPipelineType(ShaderType shaderType);
+
+// For use with KHR_debug.
+const char *GetDebugMessageSourceString(GLenum source);
+const char *GetDebugMessageTypeString(GLenum type);
+const char *GetDebugMessageSeverityString(GLenum severity);
+
+// For use with EXT_texture_format_sRGB_override and EXT_texture_sRGB_decode
+// A texture may be forced to decode to a nonlinear colorspace, to a linear colorspace, or to the
+// default colorspace of its current format.
+//
+// Default corresponds to "the texture should use the imageview that corresponds to its format"
+// Linear corresponds to "the texture has sRGB decoding disabled by extension, and should use a
+// linear imageview even if it is in a nonlinear format" NonLinear corresponds to "the texture has
+// sRGB override enabled by extension, and should use a nonlinear imageview even if it is in a
+// linear format"
+enum class SrgbOverride
+{
+ Default = 0,
+ SRGB,
+ Linear
+};
+
+// For use with EXT_sRGB_write_control
+// A render target may be forced to convert to a linear colorspace, or may be allowed to do whatever
+// colorspace conversion is appropriate for its format. There is no option to force linear->sRGB, it
+// can only convert from sRGB->linear
+enum class SrgbWriteControlMode
+{
+ Default = 0,
+ Linear = 1
+};
+
+// For use with EXT_YUV_target
+// A sampler of external YUV textures may either implicitly perform RGB conversion (regular
+// samplerExternalOES) or skip the conversion and sample raw YUV values (__samplerExternal2DY2Y).
+enum class YuvSamplingMode
+{
+ Default = 0,
+ Y2Y = 1
+};
+
+ShaderType GetShaderTypeFromBitfield(size_t singleShaderType);
+GLbitfield GetBitfieldFromShaderType(ShaderType shaderType);
+bool ShaderTypeSupportsTransformFeedback(ShaderType shaderType);
+// Given a set of shader stages, returns the last vertex processing stage. This is the stage that
+// interfaces the fragment shader.
+ShaderType GetLastPreFragmentStage(ShaderBitSet shaderTypes);
+
+} // namespace gl
+
+namespace egl
+{
+static const EGLenum FirstCubeMapTextureTarget = EGL_GL_TEXTURE_CUBE_MAP_POSITIVE_X_KHR;
+static const EGLenum LastCubeMapTextureTarget = EGL_GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_KHR;
+bool IsCubeMapTextureTarget(EGLenum target);
+size_t CubeMapTextureTargetToLayerIndex(EGLenum target);
+EGLenum LayerIndexToCubeMapTextureTarget(size_t index);
+bool IsTextureTarget(EGLenum target);
+bool IsRenderbufferTarget(EGLenum target);
+bool IsExternalImageTarget(EGLenum target);
+
+const char *GetGenericErrorMessage(EGLint error);
+} // namespace egl
+
+namespace egl_gl
+{
+GLuint EGLClientBufferToGLObjectHandle(EGLClientBuffer buffer);
+}
+
+namespace gl_egl
+{
+EGLenum GLComponentTypeToEGLColorComponentType(GLenum glComponentType);
+EGLClientBuffer GLObjectHandleToEGLClientBuffer(GLuint handle);
+} // namespace gl_egl
+
+namespace angle
+{
+bool IsDrawEntryPoint(EntryPoint entryPoint);
+bool IsDispatchEntryPoint(EntryPoint entryPoint);
+bool IsClearEntryPoint(EntryPoint entryPoint);
+bool IsQueryEntryPoint(EntryPoint entryPoint);
+} // namespace angle
+
+#if !defined(ANGLE_ENABLE_WINDOWS_UWP)
+void writeFile(const char *path, const void *data, size_t size);
+#endif
+
+#if defined(ANGLE_PLATFORM_WINDOWS)
+void ScheduleYield();
+#endif
+
+// Get the underlying type. Useful for indexing into arrays with enum values by avoiding the clutter
+// of the extraneous static_cast<>() calls.
+// https://stackoverflow.com/a/8357462
+template <typename E>
+constexpr typename std::underlying_type<E>::type ToUnderlying(E e) noexcept
+{
+ return static_cast<typename std::underlying_type<E>::type>(e);
+}
+
+#endif // COMMON_UTILITIES_H_
diff --git a/gfx/angle/checkout/src/common/vector_utils.h b/gfx/angle/checkout/src/common/vector_utils.h
new file mode 100644
index 0000000000..88c7492e72
--- /dev/null
+++ b/gfx/angle/checkout/src/common/vector_utils.h
@@ -0,0 +1,523 @@
+//
+// Copyright 2016 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// vector_utils.h: Utility classes implementing various vector operations
+
+#ifndef COMMON_VECTOR_UTILS_H_
+#define COMMON_VECTOR_UTILS_H_
+
+#include <cmath>
+#include <cstddef>
+#include <ostream>
+#include <type_traits>
+
+namespace angle
+{
+
+template <size_t Dimension, typename Type>
+class Vector;
+
+using Vector2 = Vector<2, float>;
+using Vector3 = Vector<3, float>;
+using Vector4 = Vector<4, float>;
+
+using Vector2I = Vector<2, int>;
+using Vector3I = Vector<3, int>;
+using Vector4I = Vector<4, int>;
+
+using Vector2U = Vector<2, unsigned int>;
+using Vector3U = Vector<3, unsigned int>;
+using Vector4U = Vector<4, unsigned int>;
+
+template <size_t Dimension, typename Type>
+class VectorBase
+{
+ public:
+ using VectorN = Vector<Dimension, Type>;
+
+ // Constructors
+ VectorBase() = default;
+ explicit VectorBase(Type element);
+
+ template <typename Type2>
+ VectorBase(const VectorBase<Dimension, Type2> &other);
+
+ template <typename Arg1, typename Arg2, typename... Args>
+ VectorBase(const Arg1 &arg1, const Arg2 &arg2, const Args &...args);
+
+ // Access the vector backing storage directly
+ const Type *data() const { return mData; }
+ Type *data() { return mData; }
+ constexpr size_t size() const { return Dimension; }
+
+ // Load or store the pointer from / to raw data
+ static VectorN Load(const Type *source);
+ static void Store(const VectorN &source, Type *destination);
+
+ // Index the vector
+ Type &operator[](size_t i) { return mData[i]; }
+ const Type &operator[](size_t i) const { return mData[i]; }
+
+ // Basic arithmetic operations
+ VectorN operator+() const;
+ VectorN operator-() const;
+ VectorN operator+(const VectorN &other) const;
+ VectorN operator-(const VectorN &other) const;
+ VectorN operator*(const VectorN &other) const;
+ VectorN operator/(const VectorN &other) const;
+ VectorN operator*(Type other) const;
+ VectorN operator/(Type other) const;
+ friend VectorN operator*(Type a, const VectorN &b) { return b * a; }
+
+ // Compound arithmetic operations
+ VectorN &operator+=(const VectorN &other);
+ VectorN &operator-=(const VectorN &other);
+ VectorN &operator*=(const VectorN &other);
+ VectorN &operator/=(const VectorN &other);
+ VectorN &operator*=(Type other);
+ VectorN &operator/=(Type other);
+
+ // Comparison operators
+ bool operator==(const VectorBase<Dimension, Type> &other) const;
+ bool operator!=(const VectorBase<Dimension, Type> &other) const;
+
+ // Other arithmetic operations
+ Type length() const;
+ Type lengthSquared() const;
+ Type dot(const VectorBase<Dimension, Type> &other) const;
+ VectorN normalized() const;
+
+ protected:
+ template <size_t CurrentIndex, size_t OtherDimension, typename OtherType, typename... Args>
+ void initWithList(const Vector<OtherDimension, OtherType> &arg1, const Args &...args);
+
+ // Some old compilers consider this function an alternative for initWithList(Vector)
+ // when the variant above is more precise. Use SFINAE on the return value to hide
+ // this variant for non-arithmetic types. The return value is still void.
+ template <size_t CurrentIndex, typename OtherType, typename... Args>
+ typename std::enable_if<std::is_arithmetic<OtherType>::value>::type initWithList(
+ OtherType arg1,
+ const Args &...args);
+
+ template <size_t CurrentIndex>
+ void initWithList() const;
+
+ template <size_t Dimension2, typename Type2>
+ friend class VectorBase;
+
+ Type mData[Dimension];
+};
+
+template <size_t Dimension, typename Type>
+std::ostream &operator<<(std::ostream &ostream, const VectorBase<Dimension, Type> &vector);
+
+template <typename Type>
+class Vector<2, Type> : public VectorBase<2, Type>
+{
+ public:
+ // Import the constructors defined in VectorBase
+ using VectorBase<2, Type>::VectorBase;
+
+ // Element shorthands
+ Type &x() { return this->mData[0]; }
+ Type &y() { return this->mData[1]; }
+
+ const Type &x() const { return this->mData[0]; }
+ const Type &y() const { return this->mData[1]; }
+};
+
+template <typename Type>
+std::ostream &operator<<(std::ostream &ostream, const Vector<2, Type> &vector);
+
+template <typename Type>
+class Vector<3, Type> : public VectorBase<3, Type>
+{
+ public:
+ // Import the constructors defined in VectorBase
+ using VectorBase<3, Type>::VectorBase;
+
+ // Additional operations
+ Vector<3, Type> cross(const Vector<3, Type> &other) const;
+
+ // Element shorthands
+ Type &x() { return this->mData[0]; }
+ Type &y() { return this->mData[1]; }
+ Type &z() { return this->mData[2]; }
+
+ const Type &x() const { return this->mData[0]; }
+ const Type &y() const { return this->mData[1]; }
+ const Type &z() const { return this->mData[2]; }
+};
+
+template <typename Type>
+std::ostream &operator<<(std::ostream &ostream, const Vector<3, Type> &vector);
+
+template <typename Type>
+class Vector<4, Type> : public VectorBase<4, Type>
+{
+ public:
+ // Import the constructors defined in VectorBase
+ using VectorBase<4, Type>::VectorBase;
+
+ // Element shorthands
+ Type &x() { return this->mData[0]; }
+ Type &y() { return this->mData[1]; }
+ Type &z() { return this->mData[2]; }
+ Type &w() { return this->mData[3]; }
+
+ const Type &x() const { return this->mData[0]; }
+ const Type &y() const { return this->mData[1]; }
+ const Type &z() const { return this->mData[2]; }
+ const Type &w() const { return this->mData[3]; }
+};
+
+template <typename Type>
+std::ostream &operator<<(std::ostream &ostream, const Vector<4, Type> &vector);
+
+// Implementation of constructors and misc operations
+
+template <size_t Dimension, typename Type>
+VectorBase<Dimension, Type>::VectorBase(Type element)
+{
+ for (size_t i = 0; i < Dimension; ++i)
+ {
+ mData[i] = element;
+ }
+}
+
+template <size_t Dimension, typename Type>
+template <typename Type2>
+VectorBase<Dimension, Type>::VectorBase(const VectorBase<Dimension, Type2> &other)
+{
+ for (size_t i = 0; i < Dimension; ++i)
+ {
+ mData[i] = static_cast<Type>(other.mData[i]);
+ }
+}
+
+// Ideally we would like to have only two constructors:
+// - a scalar constructor that takes Type as a parameter
+// - a compound constructor
+// However if we define the compound constructor for when it has a single arguments, then calling
+// Vector2(0.0) will be ambiguous. To solve this we take advantage of there being a single compound
+// constructor with a single argument, which is the copy constructor. We end up with three
+// constructors:
+// - the scalar constructor
+// - the copy constructor
+// - the compound constructor for two or more arguments, hence the arg1, and arg2 here.
+template <size_t Dimension, typename Type>
+template <typename Arg1, typename Arg2, typename... Args>
+VectorBase<Dimension, Type>::VectorBase(const Arg1 &arg1, const Arg2 &arg2, const Args &...args)
+{
+ initWithList<0>(arg1, arg2, args...);
+}
+
+template <size_t Dimension, typename Type>
+template <size_t CurrentIndex, size_t OtherDimension, typename OtherType, typename... Args>
+void VectorBase<Dimension, Type>::initWithList(const Vector<OtherDimension, OtherType> &arg1,
+ const Args &...args)
+{
+ static_assert(CurrentIndex + OtherDimension <= Dimension,
+ "Too much data in the vector constructor.");
+ for (size_t i = 0; i < OtherDimension; ++i)
+ {
+ mData[CurrentIndex + i] = static_cast<Type>(arg1.mData[i]);
+ }
+ initWithList<CurrentIndex + OtherDimension>(args...);
+}
+
+template <size_t Dimension, typename Type>
+template <size_t CurrentIndex, typename OtherType, typename... Args>
+typename std::enable_if<std::is_arithmetic<OtherType>::value>::type
+VectorBase<Dimension, Type>::initWithList(OtherType arg1, const Args &...args)
+{
+ static_assert(CurrentIndex + 1 <= Dimension, "Too much data in the vector constructor.");
+ mData[CurrentIndex] = static_cast<Type>(arg1);
+ initWithList<CurrentIndex + 1>(args...);
+}
+
+template <size_t Dimension, typename Type>
+template <size_t CurrentIndex>
+void VectorBase<Dimension, Type>::initWithList() const
+{
+ static_assert(CurrentIndex == Dimension, "Not enough data in the vector constructor.");
+}
+
+template <size_t Dimension, typename Type>
+Vector<Dimension, Type> VectorBase<Dimension, Type>::Load(const Type *source)
+{
+ Vector<Dimension, Type> result;
+ for (size_t i = 0; i < Dimension; ++i)
+ {
+ result.mData[i] = source[i];
+ }
+ return result;
+}
+
+template <size_t Dimension, typename Type>
+void VectorBase<Dimension, Type>::Store(const Vector<Dimension, Type> &source, Type *destination)
+{
+ for (size_t i = 0; i < Dimension; ++i)
+ {
+ destination[i] = source.mData[i];
+ }
+}
+
+// Implementation of basic arithmetic operations
+template <size_t Dimension, typename Type>
+Vector<Dimension, Type> VectorBase<Dimension, Type>::operator+() const
+{
+ Vector<Dimension, Type> result;
+ for (size_t i = 0; i < Dimension; ++i)
+ {
+ result.mData[i] = +mData[i];
+ }
+ return result;
+}
+
+template <size_t Dimension, typename Type>
+Vector<Dimension, Type> VectorBase<Dimension, Type>::operator-() const
+{
+ Vector<Dimension, Type> result;
+ for (size_t i = 0; i < Dimension; ++i)
+ {
+ result.mData[i] = -mData[i];
+ }
+ return result;
+}
+
+template <size_t Dimension, typename Type>
+Vector<Dimension, Type> VectorBase<Dimension, Type>::operator+(
+ const Vector<Dimension, Type> &other) const
+{
+ Vector<Dimension, Type> result;
+ for (size_t i = 0; i < Dimension; ++i)
+ {
+ result.mData[i] = mData[i] + other.mData[i];
+ }
+ return result;
+}
+
+template <size_t Dimension, typename Type>
+Vector<Dimension, Type> VectorBase<Dimension, Type>::operator-(
+ const Vector<Dimension, Type> &other) const
+{
+ Vector<Dimension, Type> result;
+ for (size_t i = 0; i < Dimension; ++i)
+ {
+ result.mData[i] = mData[i] - other.mData[i];
+ }
+ return result;
+}
+
+template <size_t Dimension, typename Type>
+Vector<Dimension, Type> VectorBase<Dimension, Type>::operator*(
+ const Vector<Dimension, Type> &other) const
+{
+ Vector<Dimension, Type> result;
+ for (size_t i = 0; i < Dimension; ++i)
+ {
+ result.mData[i] = mData[i] * other.mData[i];
+ }
+ return result;
+}
+
+template <size_t Dimension, typename Type>
+Vector<Dimension, Type> VectorBase<Dimension, Type>::operator/(
+ const Vector<Dimension, Type> &other) const
+{
+ Vector<Dimension, Type> result;
+ for (size_t i = 0; i < Dimension; ++i)
+ {
+ result.mData[i] = mData[i] / other.mData[i];
+ }
+ return result;
+}
+
+template <size_t Dimension, typename Type>
+Vector<Dimension, Type> VectorBase<Dimension, Type>::operator*(Type other) const
+{
+ Vector<Dimension, Type> result;
+ for (size_t i = 0; i < Dimension; ++i)
+ {
+ result.mData[i] = mData[i] * other;
+ }
+ return result;
+}
+
+template <size_t Dimension, typename Type>
+Vector<Dimension, Type> VectorBase<Dimension, Type>::operator/(Type other) const
+{
+ Vector<Dimension, Type> result;
+ for (size_t i = 0; i < Dimension; ++i)
+ {
+ result.mData[i] = mData[i] / other;
+ }
+ return result;
+}
+
+// Implementation of compound arithmetic operations
+template <size_t Dimension, typename Type>
+Vector<Dimension, Type> &VectorBase<Dimension, Type>::operator+=(
+ const Vector<Dimension, Type> &other)
+{
+ for (size_t i = 0; i < Dimension; ++i)
+ {
+ mData[i] += other.mData[i];
+ }
+ return *static_cast<Vector<Dimension, Type> *>(this);
+}
+
+template <size_t Dimension, typename Type>
+Vector<Dimension, Type> &VectorBase<Dimension, Type>::operator-=(
+ const Vector<Dimension, Type> &other)
+{
+ for (size_t i = 0; i < Dimension; ++i)
+ {
+ mData[i] -= other.mData[i];
+ }
+ return *static_cast<Vector<Dimension, Type> *>(this);
+}
+
+template <size_t Dimension, typename Type>
+Vector<Dimension, Type> &VectorBase<Dimension, Type>::operator*=(
+ const Vector<Dimension, Type> &other)
+{
+ for (size_t i = 0; i < Dimension; ++i)
+ {
+ mData[i] *= other.mData[i];
+ }
+ return *static_cast<Vector<Dimension, Type> *>(this);
+}
+
+template <size_t Dimension, typename Type>
+Vector<Dimension, Type> &VectorBase<Dimension, Type>::operator/=(
+ const Vector<Dimension, Type> &other)
+{
+ for (size_t i = 0; i < Dimension; ++i)
+ {
+ mData[i] /= other.mData[i];
+ }
+ return *static_cast<Vector<Dimension, Type> *>(this);
+}
+
+template <size_t Dimension, typename Type>
+Vector<Dimension, Type> &VectorBase<Dimension, Type>::operator*=(Type other)
+{
+ for (size_t i = 0; i < Dimension; ++i)
+ {
+ mData[i] *= other;
+ }
+ return *static_cast<Vector<Dimension, Type> *>(this);
+}
+
+template <size_t Dimension, typename Type>
+Vector<Dimension, Type> &VectorBase<Dimension, Type>::operator/=(Type other)
+{
+ for (size_t i = 0; i < Dimension; ++i)
+ {
+ mData[i] /= other;
+ }
+ return *static_cast<Vector<Dimension, Type> *>(this);
+}
+
+// Implementation of comparison operators
+template <size_t Dimension, typename Type>
+bool VectorBase<Dimension, Type>::operator==(const VectorBase<Dimension, Type> &other) const
+{
+ for (size_t i = 0; i < Dimension; ++i)
+ {
+ if (mData[i] != other.mData[i])
+ {
+ return false;
+ }
+ }
+ return true;
+}
+
+template <size_t Dimension, typename Type>
+bool VectorBase<Dimension, Type>::operator!=(const VectorBase<Dimension, Type> &other) const
+{
+ return !(*this == other);
+}
+
+// Implementation of other arithmetic operations
+template <size_t Dimension, typename Type>
+Type VectorBase<Dimension, Type>::length() const
+{
+ static_assert(std::is_floating_point<Type>::value,
+ "VectorN::length is only defined for floating point vectors");
+ return std::sqrt(lengthSquared());
+}
+
+template <size_t Dimension, typename Type>
+Type VectorBase<Dimension, Type>::lengthSquared() const
+{
+ return dot(*this);
+}
+
+template <size_t Dimension, typename Type>
+Type VectorBase<Dimension, Type>::dot(const VectorBase<Dimension, Type> &other) const
+{
+ Type sum = Type();
+ for (size_t i = 0; i < Dimension; ++i)
+ {
+ sum += mData[i] * other.mData[i];
+ }
+ return sum;
+}
+
+template <size_t Dimension, typename Type>
+std::ostream &operator<<(std::ostream &ostream, const VectorBase<Dimension, Type> &vector)
+{
+ ostream << "[ ";
+ for (size_t elementIdx = 0; elementIdx < Dimension; elementIdx++)
+ {
+ if (elementIdx > 0)
+ {
+ ostream << ", ";
+ }
+ ostream << vector.data()[elementIdx];
+ }
+ ostream << " ]";
+ return ostream;
+}
+
+template <size_t Dimension, typename Type>
+Vector<Dimension, Type> VectorBase<Dimension, Type>::normalized() const
+{
+ static_assert(std::is_floating_point<Type>::value,
+ "VectorN::normalized is only defined for floating point vectors");
+ return *this / length();
+}
+
+template <typename Type>
+std::ostream &operator<<(std::ostream &ostream, const Vector<2, Type> &vector)
+{
+ return ostream << static_cast<const VectorBase<2, Type> &>(vector);
+}
+
+template <typename Type>
+Vector<3, Type> Vector<3, Type>::cross(const Vector<3, Type> &other) const
+{
+ return Vector<3, Type>(y() * other.z() - z() * other.y(), z() * other.x() - x() * other.z(),
+ x() * other.y() - y() * other.x());
+}
+
+template <typename Type>
+std::ostream &operator<<(std::ostream &ostream, const Vector<3, Type> &vector)
+{
+ return ostream << static_cast<const VectorBase<3, Type> &>(vector);
+}
+
+template <typename Type>
+std::ostream &operator<<(std::ostream &ostream, const Vector<4, Type> &vector)
+{
+ return ostream << static_cast<const VectorBase<4, Type> &>(vector);
+}
+
+} // namespace angle
+
+#endif // COMMON_VECTOR_UTILS_H_
diff --git a/gfx/angle/checkout/src/common/vulkan/libvulkan_loader.cpp b/gfx/angle/checkout/src/common/vulkan/libvulkan_loader.cpp
new file mode 100644
index 0000000000..dafe092732
--- /dev/null
+++ b/gfx/angle/checkout/src/common/vulkan/libvulkan_loader.cpp
@@ -0,0 +1,57 @@
+//
+// Copyright 2021 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// libvulkan_loader.cpp:
+// Helper functions for the loading Vulkan libraries.
+//
+
+#include "common/vulkan/libvulkan_loader.h"
+
+#include "common/system_utils.h"
+
+namespace angle
+{
+namespace vk
+{
+void *OpenLibVulkan()
+{
+ constexpr const char *kLibVulkanNames[] = {
+#if defined(ANGLE_PLATFORM_WINDOWS)
+ "vulkan-1.dll",
+#elif defined(ANGLE_PLATFORM_APPLE)
+ "libvulkan.dylib",
+ "libvulkan.1.dylib",
+ "libMoltenVK.dylib"
+#else
+ "libvulkan.so",
+ "libvulkan.so.1",
+#endif
+ };
+
+ constexpr SearchType kSearchTypes[] = {
+// On Android, Fuchsia and GGP we use the system libvulkan.
+#if defined(ANGLE_USE_CUSTOM_LIBVULKAN)
+ SearchType::ModuleDir,
+#else
+ SearchType::SystemDir,
+#endif // defined(ANGLE_USE_CUSTOM_LIBVULKAN)
+ };
+
+ for (angle::SearchType searchType : kSearchTypes)
+ {
+ for (const char *libraryName : kLibVulkanNames)
+ {
+ void *library = OpenSystemLibraryWithExtension(libraryName, searchType);
+ if (library)
+ {
+ return library;
+ }
+ }
+ }
+
+ return nullptr;
+}
+} // namespace vk
+} // namespace angle
diff --git a/gfx/angle/checkout/src/common/vulkan/libvulkan_loader.h b/gfx/angle/checkout/src/common/vulkan/libvulkan_loader.h
new file mode 100644
index 0000000000..1a73c87b68
--- /dev/null
+++ b/gfx/angle/checkout/src/common/vulkan/libvulkan_loader.h
@@ -0,0 +1,23 @@
+//
+// Copyright 2021 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// libvulkan_loader.h:
+// Helper functions for the loading Vulkan libraries.
+//
+
+#include <memory>
+
+#ifndef LIBANGLE_COMMON_VULKAN_LIBVULKAN_LOADER_H_
+# define LIBANGLE_COMMON_VULKAN_LIBVULKAN_LOADER_H_
+
+namespace angle
+{
+namespace vk
+{
+void *OpenLibVulkan();
+}
+} // namespace angle
+
+#endif // LIBANGLE_COMMON_VULKAN_LIBVULKAN_LOADER_H_
diff --git a/gfx/angle/checkout/src/common/vulkan/vk_google_filtering_precision.h b/gfx/angle/checkout/src/common/vulkan/vk_google_filtering_precision.h
new file mode 100644
index 0000000000..934dc793c6
--- /dev/null
+++ b/gfx/angle/checkout/src/common/vulkan/vk_google_filtering_precision.h
@@ -0,0 +1,57 @@
+// Copyright 2020 The SwiftShader Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef CUSTOM_VK_GOOGLE_SAMPLER_FILTERING_PRECISION_H_
+#define CUSTOM_VK_GOOGLE_SAMPLER_FILTERING_PRECISION_H_
+
+#include "vk_headers.h"
+
+// THIS FILE SHOULD BE DELETED IF VK_GOOGLE_sampler_filtering_precision IS EVER ADDED TO THE VULKAN
+// HEADERS
+#ifdef VK_GOOGLE_sampler_filtering_precision
+# error \
+ "VK_GOOGLE_sampler_filtering_precision is already defined in the Vulkan headers, you can delete this file"
+#endif
+
+static constexpr VkStructureType VK_STRUCTURE_TYPE_SAMPLER_FILTERING_PRECISION_GOOGLE =
+ static_cast<VkStructureType>(1000264000);
+
+#define VK_GOOGLE_sampler_filtering_precision 1
+#define VK_GOOGLE_SAMPLER_FILTERING_PRECISION_SPEC_VERSION 1
+#define VK_GOOGLE_SAMPLER_FILTERING_PRECISION_EXTENSION_NAME "VK_GOOGLE_sampler_filtering_precision"
+
+const int TEXTURE_FILTERING_HINT_CHROMIUM = 0x8AF0;
+
+typedef enum VkSamplerFilteringPrecisionModeGOOGLE
+{
+ VK_SAMPLER_FILTERING_PRECISION_MODE_LOW_GOOGLE = 0,
+ VK_SAMPLER_FILTERING_PRECISION_MODE_HIGH_GOOGLE = 1,
+ VK_SAMPLER_FILTERING_PRECISION_MODE_BEGIN_RANGE_GOOGLE =
+ VK_SAMPLER_FILTERING_PRECISION_MODE_LOW_GOOGLE,
+ VK_SAMPLER_FILTERING_PRECISION_MODE_END_RANGE_GOOGLE =
+ VK_SAMPLER_FILTERING_PRECISION_MODE_HIGH_GOOGLE,
+ VK_SAMPLER_FILTERING_PRECISION_MODE_RANGE_SIZE_GOOGLE =
+ (VK_SAMPLER_FILTERING_PRECISION_MODE_HIGH_GOOGLE -
+ VK_SAMPLER_FILTERING_PRECISION_MODE_LOW_GOOGLE + 1),
+ VK_SAMPLER_FILTERING_PRECISION_MODE_MAX_ENUM_GOOGLE = 0x7FFFFFFF
+} VkSamplerFilteringPrecisionModeGOOGLE;
+
+typedef struct VkSamplerFilteringPrecisionGOOGLE
+{
+ VkStructureType sType;
+ const void *pNext;
+ VkSamplerFilteringPrecisionModeGOOGLE samplerFilteringPrecisionMode;
+} VkSamplerFilteringPrecisionGOOGLE;
+
+#endif // CUSTOM_VK_GOOGLE_SAMPLER_FILTERING_PRECISION_H_
diff --git a/gfx/angle/checkout/src/common/vulkan/vk_headers.h b/gfx/angle/checkout/src/common/vulkan/vk_headers.h
new file mode 100644
index 0000000000..a16938a872
--- /dev/null
+++ b/gfx/angle/checkout/src/common/vulkan/vk_headers.h
@@ -0,0 +1,163 @@
+//
+// Copyright 2016 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// vk_headers:
+// This file should be included to ensure the vulkan headers are included
+//
+
+#ifndef LIBANGLE_RENDERER_VULKAN_VK_HEADERS_H_
+#define LIBANGLE_RENDERER_VULKAN_VK_HEADERS_H_
+
+#if ANGLE_SHARED_LIBVULKAN
+# include "third_party/volk/volk.h"
+#else
+# include <vulkan/vulkan.h>
+#endif
+
+// For the unreleased VK_GOOGLEX_multisampled_render_to_single_sampled
+#if !defined(VK_GOOGLEX_multisampled_render_to_single_sampled)
+# define VK_GOOGLEX_multisampled_render_to_single_sampled 1
+# define VK_GOOGLEX_MULTISAMPLED_RENDER_TO_SINGLE_SAMPLED_SPEC_VERSION 1
+# define VK_GOOGLEX_MULTISAMPLED_RENDER_TO_SINGLE_SAMPLED_EXTENSION_NAME \
+ "VK_GOOGLEX_multisampled_render_to_single_sampled"
+
+# define VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTISAMPLED_RENDER_TO_SINGLE_SAMPLED_FEATURES_GOOGLEX \
+ ((VkStructureType)(1000376000))
+# define VK_STRUCTURE_TYPE_MULTISAMPLED_RENDER_TO_SINGLE_SAMPLED_INFO_GOOGLEX \
+ ((VkStructureType)(1000376001))
+
+typedef struct VkPhysicalDeviceMultisampledRenderToSingleSampledFeaturesGOOGLEX
+{
+ VkStructureType sType;
+ const void *pNext;
+ VkBool32 multisampledRenderToSingleSampled;
+} VkPhysicalDeviceMultisampledRenderToSingleSampledFeaturesGOOGLEX;
+
+typedef struct VkMultisampledRenderToSingleSampledInfoGOOGLEX
+{
+ VkStructureType sType;
+ const void *pNext;
+ VkBool32 multisampledRenderToSingleSampledEnable;
+ VkSampleCountFlagBits rasterizationSamples;
+ VkResolveModeFlagBits depthResolveMode;
+ VkResolveModeFlagBits stencilResolveMode;
+} VkMultisampledRenderToSingleSampledInfoGOOGLEX;
+#endif /* VK_GOOGLEX_multisampled_render_to_single_sampled */
+
+#if !defined(ANGLE_SHARED_LIBVULKAN)
+
+namespace rx
+{
+// VK_EXT_debug_utils
+extern PFN_vkCreateDebugUtilsMessengerEXT vkCreateDebugUtilsMessengerEXT;
+extern PFN_vkDestroyDebugUtilsMessengerEXT vkDestroyDebugUtilsMessengerEXT;
+extern PFN_vkCmdBeginDebugUtilsLabelEXT vkCmdBeginDebugUtilsLabelEXT;
+extern PFN_vkCmdEndDebugUtilsLabelEXT vkCmdEndDebugUtilsLabelEXT;
+extern PFN_vkCmdInsertDebugUtilsLabelEXT vkCmdInsertDebugUtilsLabelEXT;
+extern PFN_vkSetDebugUtilsObjectNameEXT vkSetDebugUtilsObjectNameEXT;
+
+// VK_EXT_debug_report
+extern PFN_vkCreateDebugReportCallbackEXT vkCreateDebugReportCallbackEXT;
+extern PFN_vkDestroyDebugReportCallbackEXT vkDestroyDebugReportCallbackEXT;
+
+// VK_KHR_get_physical_device_properties2
+extern PFN_vkGetPhysicalDeviceProperties2KHR vkGetPhysicalDeviceProperties2KHR;
+extern PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR;
+extern PFN_vkGetPhysicalDeviceMemoryProperties2KHR vkGetPhysicalDeviceMemoryProperties2KHR;
+
+// VK_KHR_external_semaphore_fd
+extern PFN_vkImportSemaphoreFdKHR vkImportSemaphoreFdKHR;
+
+// VK_EXT_external_memory_host
+extern PFN_vkGetMemoryHostPointerPropertiesEXT vkGetMemoryHostPointerPropertiesEXT;
+
+// VK_EXT_host_query_reset
+extern PFN_vkResetQueryPoolEXT vkResetQueryPoolEXT;
+
+// VK_EXT_transform_feedback
+extern PFN_vkCmdBindTransformFeedbackBuffersEXT vkCmdBindTransformFeedbackBuffersEXT;
+extern PFN_vkCmdBeginTransformFeedbackEXT vkCmdBeginTransformFeedbackEXT;
+extern PFN_vkCmdEndTransformFeedbackEXT vkCmdEndTransformFeedbackEXT;
+extern PFN_vkCmdBeginQueryIndexedEXT vkCmdBeginQueryIndexedEXT;
+extern PFN_vkCmdEndQueryIndexedEXT vkCmdEndQueryIndexedEXT;
+extern PFN_vkCmdDrawIndirectByteCountEXT vkCmdDrawIndirectByteCountEXT;
+
+// VK_KHR_get_memory_requirements2
+extern PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
+extern PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
+
+// VK_KHR_bind_memory2
+extern PFN_vkBindBufferMemory2KHR vkBindBufferMemory2KHR;
+extern PFN_vkBindImageMemory2KHR vkBindImageMemory2KHR;
+
+// VK_KHR_external_fence_capabilities
+extern PFN_vkGetPhysicalDeviceExternalFencePropertiesKHR
+ vkGetPhysicalDeviceExternalFencePropertiesKHR;
+
+// VK_KHR_external_fence_fd
+extern PFN_vkGetFenceFdKHR vkGetFenceFdKHR;
+extern PFN_vkImportFenceFdKHR vkImportFenceFdKHR;
+
+// VK_KHR_external_semaphore_capabilities
+extern PFN_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR
+ vkGetPhysicalDeviceExternalSemaphorePropertiesKHR;
+
+// VK_KHR_sampler_ycbcr_conversion
+extern PFN_vkCreateSamplerYcbcrConversionKHR vkCreateSamplerYcbcrConversionKHR;
+extern PFN_vkDestroySamplerYcbcrConversionKHR vkDestroySamplerYcbcrConversionKHR;
+
+// VK_KHR_create_renderpass2
+extern PFN_vkCreateRenderPass2KHR vkCreateRenderPass2KHR;
+
+# if defined(ANGLE_PLATFORM_FUCHSIA)
+// VK_FUCHSIA_imagepipe_surface
+extern PFN_vkCreateImagePipeSurfaceFUCHSIA vkCreateImagePipeSurfaceFUCHSIA;
+# endif
+
+# if defined(ANGLE_PLATFORM_ANDROID)
+extern PFN_vkGetAndroidHardwareBufferPropertiesANDROID vkGetAndroidHardwareBufferPropertiesANDROID;
+extern PFN_vkGetMemoryAndroidHardwareBufferANDROID vkGetMemoryAndroidHardwareBufferANDROID;
+# endif
+
+# if defined(ANGLE_PLATFORM_GGP)
+extern PFN_vkCreateStreamDescriptorSurfaceGGP vkCreateStreamDescriptorSurfaceGGP;
+# endif // defined(ANGLE_PLATFORM_GGP)
+
+// VK_KHR_shared_presentable_image
+extern PFN_vkGetSwapchainStatusKHR vkGetSwapchainStatusKHR;
+
+// VK_EXT_extended_dynamic_state
+extern PFN_vkCmdBindVertexBuffers2EXT vkCmdBindVertexBuffers2EXT;
+extern PFN_vkCmdSetCullModeEXT vkCmdSetCullModeEXT;
+extern PFN_vkCmdSetDepthBoundsTestEnableEXT vkCmdSetDepthBoundsTestEnableEXT;
+extern PFN_vkCmdSetDepthCompareOpEXT vkCmdSetDepthCompareOpEXT;
+extern PFN_vkCmdSetDepthTestEnableEXT vkCmdSetDepthTestEnableEXT;
+extern PFN_vkCmdSetDepthWriteEnableEXT vkCmdSetDepthWriteEnableEXT;
+extern PFN_vkCmdSetFrontFaceEXT vkCmdSetFrontFaceEXT;
+extern PFN_vkCmdSetPrimitiveTopologyEXT vkCmdSetPrimitiveTopologyEXT;
+extern PFN_vkCmdSetScissorWithCountEXT vkCmdSetScissorWithCountEXT;
+extern PFN_vkCmdSetStencilOpEXT vkCmdSetStencilOpEXT;
+extern PFN_vkCmdSetStencilTestEnableEXT vkCmdSetStencilTestEnableEXT;
+extern PFN_vkCmdSetViewportWithCountEXT vkCmdSetViewportWithCountEXT;
+
+// VK_EXT_extended_dynamic_state2
+extern PFN_vkCmdSetDepthBiasEnableEXT vkCmdSetDepthBiasEnableEXT;
+extern PFN_vkCmdSetLogicOpEXT vkCmdSetLogicOpEXT;
+extern PFN_vkCmdSetPatchControlPointsEXT vkCmdSetPatchControlPointsEXT;
+extern PFN_vkCmdSetPrimitiveRestartEnableEXT vkCmdSetPrimitiveRestartEnableEXT;
+extern PFN_vkCmdSetRasterizerDiscardEnableEXT vkCmdSetRasterizerDiscardEnableEXT;
+
+// VK_KHR_fragment_shading_rate
+extern PFN_vkGetPhysicalDeviceFragmentShadingRatesKHR vkGetPhysicalDeviceFragmentShadingRatesKHR;
+extern PFN_vkCmdSetFragmentShadingRateKHR vkCmdSetFragmentShadingRateKHR;
+
+// VK_GOOGLE_display_timing
+extern PFN_vkGetPastPresentationTimingGOOGLE vkGetPastPresentationTimingGOOGLE;
+
+} // namespace rx
+
+#endif // ANGLE_SHARED_LIBVULKAN
+
+#endif // LIBANGLE_RENDERER_VULKAN_VK_HEADERS_H_
diff --git a/gfx/angle/checkout/src/common/vulkan/vulkan_icd.cpp b/gfx/angle/checkout/src/common/vulkan/vulkan_icd.cpp
new file mode 100644
index 0000000000..22502c0457
--- /dev/null
+++ b/gfx/angle/checkout/src/common/vulkan/vulkan_icd.cpp
@@ -0,0 +1,349 @@
+//
+// Copyright 2020 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+
+// vulkan_icd.cpp : Helper for creating vulkan instances & selecting physical device.
+
+#include "common/vulkan/vulkan_icd.h"
+
+#include <functional>
+#include <vector>
+
+#include "common/Optional.h"
+#include "common/bitset_utils.h"
+#include "common/debug.h"
+#include "common/system_utils.h"
+
+#include "common/vulkan/vk_google_filtering_precision.h"
+
+namespace
+{
+void ResetEnvironmentVar(const char *variableName, const Optional<std::string> &value)
+{
+ if (!value.valid())
+ {
+ return;
+ }
+
+ if (value.value().empty())
+ {
+ angle::UnsetEnvironmentVar(variableName);
+ }
+ else
+ {
+ angle::SetEnvironmentVar(variableName, value.value().c_str());
+ }
+}
+} // namespace
+
+namespace angle
+{
+
+namespace vk
+{
+
+namespace
+{
+
+[[maybe_unused]] const std::string WrapICDEnvironment(const char *icdEnvironment)
+{
+ // The libraries are bundled into the module directory
+ std::string ret = ConcatenatePath(angle::GetModuleDirectory(), icdEnvironment);
+ return ret;
+}
+
+[[maybe_unused]] constexpr char kLoaderLayersPathEnv[] = "VK_LAYER_PATH";
+[[maybe_unused]] constexpr char kLayerEnablesEnv[] = "VK_LAYER_ENABLES";
+
+constexpr char kLoaderICDFilenamesEnv[] = "VK_ICD_FILENAMES";
+constexpr char kANGLEPreferredDeviceEnv[] = "ANGLE_PREFERRED_DEVICE";
+constexpr char kValidationLayersCustomSTypeListEnv[] = "VK_LAYER_CUSTOM_STYPE_LIST";
+constexpr char kNoDeviceSelect[] = "NODEVICE_SELECT";
+
+constexpr uint32_t kMockVendorID = 0xba5eba11;
+constexpr uint32_t kMockDeviceID = 0xf005ba11;
+constexpr char kMockDeviceName[] = "Vulkan Mock Device";
+
+constexpr uint32_t kGoogleVendorID = 0x1AE0;
+constexpr uint32_t kSwiftShaderDeviceID = 0xC0DE;
+constexpr char kSwiftShaderDeviceName[] = "SwiftShader Device";
+
+using ICDFilterFunc = std::function<bool(const VkPhysicalDeviceProperties &)>;
+
+ICDFilterFunc GetFilterForICD(vk::ICD preferredICD)
+{
+ switch (preferredICD)
+ {
+ case vk::ICD::Mock:
+ return [](const VkPhysicalDeviceProperties &deviceProperties) {
+ return ((deviceProperties.vendorID == kMockVendorID) &&
+ (deviceProperties.deviceID == kMockDeviceID) &&
+ (strcmp(deviceProperties.deviceName, kMockDeviceName) == 0));
+ };
+ case vk::ICD::SwiftShader:
+ return [](const VkPhysicalDeviceProperties &deviceProperties) {
+ return ((deviceProperties.vendorID == kGoogleVendorID) &&
+ (deviceProperties.deviceID == kSwiftShaderDeviceID) &&
+ (strncmp(deviceProperties.deviceName, kSwiftShaderDeviceName,
+ strlen(kSwiftShaderDeviceName)) == 0));
+ };
+ default:
+ const std::string anglePreferredDevice =
+ angle::GetEnvironmentVar(kANGLEPreferredDeviceEnv);
+ return [anglePreferredDevice](const VkPhysicalDeviceProperties &deviceProperties) {
+ return (anglePreferredDevice == deviceProperties.deviceName);
+ };
+ }
+}
+
+} // namespace
+
+// If we're loading the validation layers, we could be running from any random directory.
+// Change to the executable directory so we can find the layers, then change back to the
+// previous directory to be safe we don't disrupt the application.
+ScopedVkLoaderEnvironment::ScopedVkLoaderEnvironment(bool enableValidationLayers, vk::ICD icd)
+ : mEnableValidationLayers(enableValidationLayers),
+ mICD(icd),
+ mChangedCWD(false),
+ mChangedICDEnv(false),
+ mChangedNoDeviceSelect(false)
+{
+// Changing CWD and setting environment variables makes no sense on Android,
+// since this code is a part of Java application there.
+// Android Vulkan loader doesn't need this either.
+#if !defined(ANGLE_PLATFORM_ANDROID) && !defined(ANGLE_PLATFORM_GGP)
+ if (icd == vk::ICD::Mock)
+ {
+ if (!setICDEnvironment(WrapICDEnvironment(ANGLE_VK_MOCK_ICD_JSON).c_str()))
+ {
+ ERR() << "Error setting environment for Mock/Null Driver.";
+ }
+ }
+# if defined(ANGLE_VK_SWIFTSHADER_ICD_JSON)
+ else if (icd == vk::ICD::SwiftShader)
+ {
+ if (!setICDEnvironment(WrapICDEnvironment(ANGLE_VK_SWIFTSHADER_ICD_JSON).c_str()))
+ {
+ ERR() << "Error setting environment for SwiftShader.";
+ }
+ }
+# endif // defined(ANGLE_VK_SWIFTSHADER_ICD_JSON)
+
+# if !defined(ANGLE_PLATFORM_MACOS)
+ if (mEnableValidationLayers || icd != vk::ICD::Default)
+ {
+ const auto &cwd = angle::GetCWD();
+ if (!cwd.valid())
+ {
+ ERR() << "Error getting CWD for Vulkan layers init.";
+ mEnableValidationLayers = false;
+ mICD = vk::ICD::Default;
+ }
+ else
+ {
+ mPreviousCWD = cwd.value();
+ std::string moduleDir = angle::GetModuleDirectory();
+ mChangedCWD = angle::SetCWD(moduleDir.c_str());
+ if (!mChangedCWD)
+ {
+ ERR() << "Error setting CWD for Vulkan layers init.";
+ mEnableValidationLayers = false;
+ mICD = vk::ICD::Default;
+ }
+ }
+ }
+# endif // defined(ANGLE_PLATFORM_MACOS)
+
+ // Override environment variable to use the ANGLE layers.
+ if (mEnableValidationLayers)
+ {
+# if defined(ANGLE_VK_LAYERS_DIR)
+ if (!angle::PrependPathToEnvironmentVar(kLoaderLayersPathEnv, ANGLE_VK_LAYERS_DIR))
+ {
+ ERR() << "Error setting environment for Vulkan layers init.";
+ mEnableValidationLayers = false;
+ }
+# endif // defined(ANGLE_VK_LAYERS_DIR)
+
+ if (!angle::PrependPathToEnvironmentVar(
+ kLayerEnablesEnv, "VK_VALIDATION_FEATURE_ENABLE_SYNCHRONIZATION_VALIDATION"))
+ {
+ ERR() << "Error setting synchronization validation environment for Vulkan validation "
+ "layers init.";
+ }
+
+ if (!setCustomExtensionsEnvironment())
+ {
+ ERR() << "Error setting custom list for custom extensions for Vulkan layers init.";
+ mEnableValidationLayers = false;
+ }
+ }
+#endif // !defined(ANGLE_PLATFORM_ANDROID)
+
+ if (IsMSan() || IsASan())
+ {
+ // device select layer cause memory sanitizer false positive, so disable
+ // it for msan build.
+ mPreviousNoDeviceSelectEnv = angle::GetEnvironmentVar(kNoDeviceSelect);
+ angle::SetEnvironmentVar(kNoDeviceSelect, "1");
+ mChangedNoDeviceSelect = true;
+ }
+}
+
+ScopedVkLoaderEnvironment::~ScopedVkLoaderEnvironment()
+{
+ if (mChangedCWD)
+ {
+#if !defined(ANGLE_PLATFORM_ANDROID)
+ ASSERT(mPreviousCWD.valid());
+ angle::SetCWD(mPreviousCWD.value().c_str());
+#endif // !defined(ANGLE_PLATFORM_ANDROID)
+ }
+ if (mChangedICDEnv)
+ {
+ ResetEnvironmentVar(kLoaderICDFilenamesEnv, mPreviousICDEnv);
+ }
+
+ ResetEnvironmentVar(kValidationLayersCustomSTypeListEnv, mPreviousCustomExtensionsEnv);
+
+ if (mChangedNoDeviceSelect)
+ {
+ ResetEnvironmentVar(kNoDeviceSelect, mPreviousNoDeviceSelectEnv);
+ }
+}
+
+bool ScopedVkLoaderEnvironment::setICDEnvironment(const char *icd)
+{
+ // Override environment variable to use built Mock ICD
+ // ANGLE_VK_ICD_JSON gets set to the built mock ICD in BUILD.gn
+ mPreviousICDEnv = angle::GetEnvironmentVar(kLoaderICDFilenamesEnv);
+ mChangedICDEnv = angle::SetEnvironmentVar(kLoaderICDFilenamesEnv, icd);
+
+ if (!mChangedICDEnv)
+ {
+ mICD = vk::ICD::Default;
+ }
+ return mChangedICDEnv;
+}
+
+bool ScopedVkLoaderEnvironment::setCustomExtensionsEnvironment()
+{
+ struct CustomExtension
+ {
+ VkStructureType type;
+ size_t size;
+ };
+
+ CustomExtension customExtensions[] = {
+
+ {VK_STRUCTURE_TYPE_SAMPLER_FILTERING_PRECISION_GOOGLE,
+ sizeof(VkSamplerFilteringPrecisionGOOGLE)},
+
+ };
+
+ mPreviousCustomExtensionsEnv = angle::GetEnvironmentVar(kValidationLayersCustomSTypeListEnv);
+
+ std::stringstream strstr;
+ for (CustomExtension &extension : customExtensions)
+ {
+ if (strstr.tellp() != std::streampos(0))
+ {
+ strstr << angle::GetPathSeparatorForEnvironmentVar();
+ }
+
+ strstr << extension.type << angle::GetPathSeparatorForEnvironmentVar() << extension.size;
+ }
+
+ return angle::PrependPathToEnvironmentVar(kValidationLayersCustomSTypeListEnv,
+ strstr.str().c_str());
+}
+
+void ChoosePhysicalDevice(PFN_vkGetPhysicalDeviceProperties pGetPhysicalDeviceProperties,
+ const std::vector<VkPhysicalDevice> &physicalDevices,
+ vk::ICD preferredICD,
+ uint32_t preferredVendorID,
+ uint32_t preferredDeviceID,
+ VkPhysicalDevice *physicalDeviceOut,
+ VkPhysicalDeviceProperties *physicalDevicePropertiesOut)
+{
+ ASSERT(!physicalDevices.empty());
+
+ ICDFilterFunc filter = GetFilterForICD(preferredICD);
+
+ const bool shouldChooseByID = (preferredVendorID != 0 || preferredDeviceID != 0);
+
+ for (const VkPhysicalDevice &physicalDevice : physicalDevices)
+ {
+ pGetPhysicalDeviceProperties(physicalDevice, physicalDevicePropertiesOut);
+ if (filter(*physicalDevicePropertiesOut))
+ {
+ *physicalDeviceOut = physicalDevice;
+ return;
+ }
+
+ if (shouldChooseByID)
+ {
+ // NOTE: If the system has multiple GPUs with the same vendor and
+ // device IDs, this will arbitrarily select one of them.
+ bool matchVendorID = true;
+ bool matchDeviceID = true;
+
+ if (preferredVendorID != 0 &&
+ preferredVendorID != physicalDevicePropertiesOut->vendorID)
+ {
+ matchVendorID = false;
+ }
+
+ if (preferredDeviceID != 0 &&
+ preferredDeviceID != physicalDevicePropertiesOut->deviceID)
+ {
+ matchDeviceID = false;
+ }
+
+ if (matchVendorID && matchDeviceID)
+ {
+ *physicalDeviceOut = physicalDevice;
+ return;
+ }
+ }
+ }
+
+ Optional<VkPhysicalDevice> integratedDevice;
+ VkPhysicalDeviceProperties integratedDeviceProperties;
+ for (const VkPhysicalDevice &physicalDevice : physicalDevices)
+ {
+ pGetPhysicalDeviceProperties(physicalDevice, physicalDevicePropertiesOut);
+ // If discrete GPU exists, uses it by default.
+ if (physicalDevicePropertiesOut->deviceType == VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU)
+ {
+ *physicalDeviceOut = physicalDevice;
+ return;
+ }
+ if (physicalDevicePropertiesOut->deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU &&
+ !integratedDevice.valid())
+ {
+ integratedDevice = physicalDevice;
+ integratedDeviceProperties = *physicalDevicePropertiesOut;
+ continue;
+ }
+ }
+
+ // If only integrated GPU exists, use it by default.
+ if (integratedDevice.valid())
+ {
+ *physicalDeviceOut = integratedDevice.value();
+ *physicalDevicePropertiesOut = integratedDeviceProperties;
+ return;
+ }
+
+ WARN() << "Preferred device ICD not found. Using default physicalDevice instead.";
+ // Fallback to the first device.
+ *physicalDeviceOut = physicalDevices[0];
+ pGetPhysicalDeviceProperties(*physicalDeviceOut, physicalDevicePropertiesOut);
+}
+
+} // namespace vk
+
+} // namespace angle
diff --git a/gfx/angle/checkout/src/common/vulkan/vulkan_icd.h b/gfx/angle/checkout/src/common/vulkan/vulkan_icd.h
new file mode 100644
index 0000000000..b2921f8203
--- /dev/null
+++ b/gfx/angle/checkout/src/common/vulkan/vulkan_icd.h
@@ -0,0 +1,72 @@
+//
+// Copyright 2020 The ANGLE Project Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// vulkan_icd.h : Helper for creating vulkan instances & selecting physical device.
+
+#ifndef COMMON_VULKAN_VULKAN_ICD_H_
+#define COMMON_VULKAN_VULKAN_ICD_H_
+
+#include <string>
+
+#include "common/Optional.h"
+#include "common/angleutils.h"
+#include "common/vulkan/vk_headers.h"
+
+namespace angle
+{
+
+namespace vk
+{
+
+enum class ICD
+{
+ Default,
+ Mock,
+ SwiftShader,
+};
+
+struct SimpleDisplayWindow
+{
+ uint16_t width;
+ uint16_t height;
+};
+
+class [[nodiscard]] ScopedVkLoaderEnvironment : angle::NonCopyable
+{
+ public:
+ ScopedVkLoaderEnvironment(bool enableValidationLayers, vk::ICD icd);
+ ~ScopedVkLoaderEnvironment();
+
+ bool canEnableValidationLayers() const { return mEnableValidationLayers; }
+ vk::ICD getEnabledICD() const { return mICD; }
+
+ private:
+ bool setICDEnvironment(const char *icd);
+ bool setCustomExtensionsEnvironment();
+
+ bool mEnableValidationLayers;
+ vk::ICD mICD;
+ bool mChangedCWD;
+ Optional<std::string> mPreviousCWD;
+ bool mChangedICDEnv;
+ Optional<std::string> mPreviousICDEnv;
+ Optional<std::string> mPreviousCustomExtensionsEnv;
+ bool mChangedNoDeviceSelect;
+ Optional<std::string> mPreviousNoDeviceSelectEnv;
+};
+
+void ChoosePhysicalDevice(PFN_vkGetPhysicalDeviceProperties pGetPhysicalDeviceProperties,
+ const std::vector<VkPhysicalDevice> &physicalDevices,
+ vk::ICD preferredICD,
+ uint32_t preferredVendorID,
+ uint32_t preferredDeviceID,
+ VkPhysicalDevice *physicalDeviceOut,
+ VkPhysicalDeviceProperties *physicalDevicePropertiesOut);
+
+} // namespace vk
+
+} // namespace angle
+
+#endif // COMMON_VULKAN_VULKAN_ICD_H_