diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-28 14:29:10 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-28 14:29:10 +0000 |
commit | 2aa4a82499d4becd2284cdb482213d541b8804dd (patch) | |
tree | b80bf8bf13c3766139fbacc530efd0dd9d54394c /gfx/angle/checkout/src/common | |
parent | Initial commit. (diff) | |
download | firefox-upstream.tar.xz firefox-upstream.zip |
Adding upstream version 86.0.1.upstream/86.0.1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'gfx/angle/checkout/src/common')
63 files changed, 17916 insertions, 0 deletions
diff --git a/gfx/angle/checkout/src/common/Color.h b/gfx/angle/checkout/src/common/Color.h new file mode 100644 index 0000000000..884da078e6 --- /dev/null +++ b/gfx/angle/checkout/src/common/Color.h @@ -0,0 +1,104 @@ +// +// Copyright (c) 2016 The ANGLE Project Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// + +// Color.h : Defines the Color type used throughout the ANGLE libraries + +#ifndef COMMON_COLOR_H_ +#define COMMON_COLOR_H_ + +#include <cstdint> + +namespace angle +{ + +template <typename T> +struct Color +{ + Color(); + constexpr Color(T r, T g, T b, T a); + + const T *data() const { return &red; } + T *ptr() { return &red; } + + static Color fromData(const T *data) { return Color(data[0], data[1], data[2], data[3]); } + void writeData(T *data) const + { + data[0] = red; + data[1] = green; + data[2] = blue; + data[3] = alpha; + } + + T red; + T green; + T blue; + T alpha; +}; + +template <typename T> +bool operator==(const Color<T> &a, const Color<T> &b); + +template <typename T> +bool operator!=(const Color<T> &a, const Color<T> &b); + +typedef Color<float> ColorF; +typedef Color<int> ColorI; +typedef Color<unsigned int> ColorUI; + +struct ColorGeneric +{ + inline ColorGeneric(); + inline ColorGeneric(const ColorF &color); + inline ColorGeneric(const ColorI &color); + inline ColorGeneric(const ColorUI &color); + + enum class Type : uint8_t + { + Float = 0, + Int = 1, + UInt = 2 + }; + + union + { + ColorF colorF; + ColorI colorI; + ColorUI colorUI; + }; + + Type type; +}; + +inline bool operator==(const ColorGeneric &a, const ColorGeneric &b); + +inline bool operator!=(const ColorGeneric &a, const ColorGeneric &b); + +struct DepthStencil +{ + DepthStencil() : depth(0), stencil(0) {} + + // Double is needed to represent the 32-bit integer range of GL_DEPTH_COMPONENT32. + double depth; + uint32_t stencil; +}; +} // namespace angle + +// TODO: Move this fully into the angle namespace +namespace gl +{ + +template <typename T> +using Color = angle::Color<T>; +using ColorF = angle::ColorF; +using ColorI = angle::ColorI; +using ColorUI = angle::ColorUI; +using ColorGeneric = angle::ColorGeneric; + +} // namespace gl + +#include "Color.inc" + +#endif // COMMON_COLOR_H_ diff --git a/gfx/angle/checkout/src/common/Color.inc b/gfx/angle/checkout/src/common/Color.inc new file mode 100644 index 0000000000..3dc7b192a3 --- /dev/null +++ b/gfx/angle/checkout/src/common/Color.inc @@ -0,0 +1,69 @@ +// +// Copyright (c) 2016 The ANGLE Project Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// + +// Color.inc : Inline definitions of some functions from Color.h + +namespace angle +{ + +template <typename T> +Color<T>::Color() : Color(0, 0, 0, 0) +{ +} + +template <typename T> +constexpr Color<T>::Color(T r, T g, T b, T a) : red(r), green(g), blue(b), alpha(a) +{ +} + +template <typename T> +bool operator==(const Color<T> &a, const Color<T> &b) +{ + return a.red == b.red && + a.green == b.green && + a.blue == b.blue && + a.alpha == b.alpha; +} + +template <typename T> +bool operator!=(const Color<T> &a, const Color<T> &b) +{ + return !(a == b); +} + + +ColorGeneric::ColorGeneric() : colorF(), type(Type::Float) {} + +ColorGeneric::ColorGeneric(const ColorF &color) : colorF(color), type(Type::Float) {} + +ColorGeneric::ColorGeneric(const ColorI &color) : colorI(color), type(Type::Int) {} + +ColorGeneric::ColorGeneric(const ColorUI &color) : colorUI(color), type(Type::UInt) {} + +bool operator==(const ColorGeneric &a, const ColorGeneric &b) +{ + if (a.type != b.type) + { + return false; + } + switch (a.type) + { + default: + case ColorGeneric::Type::Float: + return a.colorF == b.colorF; + case ColorGeneric::Type::Int: + return a.colorI == b.colorI; + case ColorGeneric::Type::UInt: + return a.colorUI == b.colorUI; + } +} + +bool operator!=(const ColorGeneric &a, const ColorGeneric &b) +{ + return !(a == b); +} + +} // namespace angle diff --git a/gfx/angle/checkout/src/common/FastVector.h b/gfx/angle/checkout/src/common/FastVector.h new file mode 100644 index 0000000000..7c86a288fc --- /dev/null +++ b/gfx/angle/checkout/src/common/FastVector.h @@ -0,0 +1,434 @@ +// +// Copyright 2018 The ANGLE Project Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// +// FastVector.h: +// A vector class with a initial fixed size and variable growth. +// Based on FixedVector. +// + +#ifndef COMMON_FASTVECTOR_H_ +#define COMMON_FASTVECTOR_H_ + +#include "common/debug.h" + +#include <algorithm> +#include <array> +#include <initializer_list> + +namespace angle +{ +template <class T, size_t N, class Storage = std::array<T, N>> +class FastVector final +{ + public: + using value_type = typename Storage::value_type; + using size_type = typename Storage::size_type; + using reference = typename Storage::reference; + using const_reference = typename Storage::const_reference; + using pointer = typename Storage::pointer; + using const_pointer = typename Storage::const_pointer; + using iterator = T *; + using const_iterator = const T *; + + FastVector(); + FastVector(size_type count, const value_type &value); + FastVector(size_type count); + + FastVector(const FastVector<T, N, Storage> &other); + FastVector(FastVector<T, N, Storage> &&other); + FastVector(std::initializer_list<value_type> init); + + FastVector<T, N, Storage> &operator=(const FastVector<T, N, Storage> &other); + FastVector<T, N, Storage> &operator=(FastVector<T, N, Storage> &&other); + FastVector<T, N, Storage> &operator=(std::initializer_list<value_type> init); + + ~FastVector(); + + reference at(size_type pos); + const_reference at(size_type pos) const; + + reference operator[](size_type pos); + const_reference operator[](size_type pos) const; + + pointer data(); + const_pointer data() const; + + iterator begin(); + const_iterator begin() const; + + iterator end(); + const_iterator end() const; + + bool empty() const; + size_type size() const; + + void clear(); + + void push_back(const value_type &value); + void push_back(value_type &&value); + + void pop_back(); + + reference front(); + const_reference front() const; + + reference back(); + const_reference back() const; + + void swap(FastVector<T, N, Storage> &other); + + void resize(size_type count); + void resize(size_type count, const value_type &value); + + // Specialty function that removes a known element and might shuffle the list. + void remove_and_permute(const value_type &element); + + private: + void assign_from_initializer_list(std::initializer_list<value_type> init); + void ensure_capacity(size_t capacity); + bool uses_fixed_storage() const; + + Storage mFixedStorage; + pointer mData = mFixedStorage.data(); + size_type mSize = 0; + size_type mReservedSize = N; +}; + +template <class T, size_t N, class StorageN, size_t M, class StorageM> +bool operator==(const FastVector<T, N, StorageN> &a, const FastVector<T, M, StorageM> &b) +{ + return a.size() == b.size() && std::equal(a.begin(), a.end(), b.begin()); +} + +template <class T, size_t N, class StorageN, size_t M, class StorageM> +bool operator!=(const FastVector<T, N, StorageN> &a, const FastVector<T, M, StorageM> &b) +{ + return !(a == b); +} + +template <class T, size_t N, class Storage> +ANGLE_INLINE bool FastVector<T, N, Storage>::uses_fixed_storage() const +{ + return mData == mFixedStorage.data(); +} + +template <class T, size_t N, class Storage> +FastVector<T, N, Storage>::FastVector() +{} + +template <class T, size_t N, class Storage> +FastVector<T, N, Storage>::FastVector(size_type count, const value_type &value) +{ + ensure_capacity(count); + mSize = count; + std::fill(begin(), end(), value); +} + +template <class T, size_t N, class Storage> +FastVector<T, N, Storage>::FastVector(size_type count) +{ + ensure_capacity(count); + mSize = count; +} + +template <class T, size_t N, class Storage> +FastVector<T, N, Storage>::FastVector(const FastVector<T, N, Storage> &other) +{ + ensure_capacity(other.mSize); + mSize = other.mSize; + std::copy(other.begin(), other.end(), begin()); +} + +template <class T, size_t N, class Storage> +FastVector<T, N, Storage>::FastVector(FastVector<T, N, Storage> &&other) : FastVector() +{ + swap(other); +} + +template <class T, size_t N, class Storage> +FastVector<T, N, Storage>::FastVector(std::initializer_list<value_type> init) +{ + assign_from_initializer_list(init); +} + +template <class T, size_t N, class Storage> +FastVector<T, N, Storage> &FastVector<T, N, Storage>::operator=( + const FastVector<T, N, Storage> &other) +{ + ensure_capacity(other.mSize); + mSize = other.mSize; + std::copy(other.begin(), other.end(), begin()); + return *this; +} + +template <class T, size_t N, class Storage> +FastVector<T, N, Storage> &FastVector<T, N, Storage>::operator=(FastVector<T, N, Storage> &&other) +{ + swap(*this, other); + return *this; +} + +template <class T, size_t N, class Storage> +FastVector<T, N, Storage> &FastVector<T, N, Storage>::operator=( + std::initializer_list<value_type> init) +{ + assign_from_initializer_list(init); + return *this; +} + +template <class T, size_t N, class Storage> +FastVector<T, N, Storage>::~FastVector() +{ + clear(); + if (!uses_fixed_storage()) + { + delete[] mData; + } +} + +template <class T, size_t N, class Storage> +typename FastVector<T, N, Storage>::reference FastVector<T, N, Storage>::at(size_type pos) +{ + ASSERT(pos < mSize); + return mData[pos]; +} + +template <class T, size_t N, class Storage> +typename FastVector<T, N, Storage>::const_reference FastVector<T, N, Storage>::at( + size_type pos) const +{ + ASSERT(pos < mSize); + return mData[pos]; +} + +template <class T, size_t N, class Storage> +ANGLE_INLINE typename FastVector<T, N, Storage>::reference FastVector<T, N, Storage>::operator[]( + size_type pos) +{ + ASSERT(pos < mSize); + return mData[pos]; +} + +template <class T, size_t N, class Storage> +ANGLE_INLINE + typename FastVector<T, N, Storage>::const_reference FastVector<T, N, Storage>::operator[]( + size_type pos) const +{ + ASSERT(pos < mSize); + return mData[pos]; +} + +template <class T, size_t N, class Storage> +ANGLE_INLINE typename FastVector<T, N, Storage>::const_pointer +angle::FastVector<T, N, Storage>::data() const +{ + ASSERT(!empty()); + return mData; +} + +template <class T, size_t N, class Storage> +ANGLE_INLINE typename FastVector<T, N, Storage>::pointer angle::FastVector<T, N, Storage>::data() +{ + ASSERT(!empty()); + return mData; +} + +template <class T, size_t N, class Storage> +ANGLE_INLINE typename FastVector<T, N, Storage>::iterator FastVector<T, N, Storage>::begin() +{ + return mData; +} + +template <class T, size_t N, class Storage> +ANGLE_INLINE typename FastVector<T, N, Storage>::const_iterator FastVector<T, N, Storage>::begin() + const +{ + return mData; +} + +template <class T, size_t N, class Storage> +ANGLE_INLINE typename FastVector<T, N, Storage>::iterator FastVector<T, N, Storage>::end() +{ + return mData + mSize; +} + +template <class T, size_t N, class Storage> +ANGLE_INLINE typename FastVector<T, N, Storage>::const_iterator FastVector<T, N, Storage>::end() + const +{ + return mData + mSize; +} + +template <class T, size_t N, class Storage> +ANGLE_INLINE bool FastVector<T, N, Storage>::empty() const +{ + return mSize == 0; +} + +template <class T, size_t N, class Storage> +ANGLE_INLINE typename FastVector<T, N, Storage>::size_type FastVector<T, N, Storage>::size() const +{ + return mSize; +} + +template <class T, size_t N, class Storage> +void FastVector<T, N, Storage>::clear() +{ + resize(0); +} + +template <class T, size_t N, class Storage> +ANGLE_INLINE void FastVector<T, N, Storage>::push_back(const value_type &value) +{ + if (mSize == mReservedSize) + ensure_capacity(mSize + 1); + mData[mSize++] = value; +} + +template <class T, size_t N, class Storage> +ANGLE_INLINE void FastVector<T, N, Storage>::push_back(value_type &&value) +{ + if (mSize == mReservedSize) + ensure_capacity(mSize + 1); + mData[mSize++] = std::move(value); +} + +template <class T, size_t N, class Storage> +ANGLE_INLINE void FastVector<T, N, Storage>::pop_back() +{ + ASSERT(mSize > 0); + mSize--; +} + +template <class T, size_t N, class Storage> +ANGLE_INLINE typename FastVector<T, N, Storage>::reference FastVector<T, N, Storage>::front() +{ + ASSERT(mSize > 0); + return mData[0]; +} + +template <class T, size_t N, class Storage> +ANGLE_INLINE typename FastVector<T, N, Storage>::const_reference FastVector<T, N, Storage>::front() + const +{ + ASSERT(mSize > 0); + return mData[0]; +} + +template <class T, size_t N, class Storage> +ANGLE_INLINE typename FastVector<T, N, Storage>::reference FastVector<T, N, Storage>::back() +{ + ASSERT(mSize > 0); + return mData[mSize - 1]; +} + +template <class T, size_t N, class Storage> +ANGLE_INLINE typename FastVector<T, N, Storage>::const_reference FastVector<T, N, Storage>::back() + const +{ + ASSERT(mSize > 0); + return mData[mSize - 1]; +} + +template <class T, size_t N, class Storage> +void FastVector<T, N, Storage>::swap(FastVector<T, N, Storage> &other) +{ + std::swap(mSize, other.mSize); + + pointer tempData = other.mData; + if (uses_fixed_storage()) + other.mData = other.mFixedStorage.data(); + else + other.mData = mData; + if (tempData == other.mFixedStorage.data()) + mData = mFixedStorage.data(); + else + mData = tempData; + std::swap(mReservedSize, other.mReservedSize); + + if (uses_fixed_storage() || other.uses_fixed_storage()) + std::swap(mFixedStorage, other.mFixedStorage); +} + +template <class T, size_t N, class Storage> +void FastVector<T, N, Storage>::resize(size_type count) +{ + if (count > mSize) + { + ensure_capacity(count); + } + mSize = count; +} + +template <class T, size_t N, class Storage> +void FastVector<T, N, Storage>::resize(size_type count, const value_type &value) +{ + if (count > mSize) + { + ensure_capacity(count); + std::fill(mData + mSize, mData + count, value); + } + mSize = count; +} + +template <class T, size_t N, class Storage> +void FastVector<T, N, Storage>::assign_from_initializer_list(std::initializer_list<value_type> init) +{ + ensure_capacity(init.size()); + mSize = init.size(); + size_t index = 0; + for (auto &value : init) + { + mData[index++] = value; + } +} + +template <class T, size_t N, class Storage> +ANGLE_INLINE void FastVector<T, N, Storage>::remove_and_permute(const value_type &element) +{ + size_t len = mSize - 1; + for (size_t index = 0; index < len; ++index) + { + if (mData[index] == element) + { + mData[index] = std::move(mData[len]); + break; + } + } + pop_back(); +} + +template <class T, size_t N, class Storage> +void FastVector<T, N, Storage>::ensure_capacity(size_t capacity) +{ + // We have a minimum capacity of N. + if (mReservedSize < capacity) + { + ASSERT(capacity > N); + size_type newSize = std::max(mReservedSize, N); + while (newSize < capacity) + { + newSize *= 2; + } + + pointer newData = new value_type[newSize]; + + if (mSize > 0) + { + std::move(begin(), end(), newData); + } + + if (!uses_fixed_storage()) + { + delete[] mData; + } + + mData = newData; + mReservedSize = newSize; + } +} +} // namespace angle + +#endif // COMMON_FASTVECTOR_H_ diff --git a/gfx/angle/checkout/src/common/FixedVector.h b/gfx/angle/checkout/src/common/FixedVector.h new file mode 100644 index 0000000000..f2c4748d5d --- /dev/null +++ b/gfx/angle/checkout/src/common/FixedVector.h @@ -0,0 +1,341 @@ +// +// Copyright 2018 The ANGLE Project Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// +// FixedVector.h: +// A vector class with a maximum size and fixed storage. +// + +#ifndef COMMON_FIXEDVECTOR_H_ +#define COMMON_FIXEDVECTOR_H_ + +#include "common/debug.h" + +#include <algorithm> +#include <array> +#include <initializer_list> + +namespace angle +{ +template <class T, size_t N, class Storage = std::array<T, N>> +class FixedVector final +{ + public: + using value_type = typename Storage::value_type; + using size_type = typename Storage::size_type; + using reference = typename Storage::reference; + using const_reference = typename Storage::const_reference; + using pointer = typename Storage::pointer; + using const_pointer = typename Storage::const_pointer; + using iterator = typename Storage::iterator; + using const_iterator = typename Storage::const_iterator; + using reverse_iterator = typename Storage::reverse_iterator; + using const_reverse_iterator = typename Storage::const_reverse_iterator; + + FixedVector(); + FixedVector(size_type count, const value_type &value); + FixedVector(size_type count); + + FixedVector(const FixedVector<T, N, Storage> &other); + FixedVector(FixedVector<T, N, Storage> &&other); + FixedVector(std::initializer_list<value_type> init); + + FixedVector<T, N, Storage> &operator=(const FixedVector<T, N, Storage> &other); + FixedVector<T, N, Storage> &operator=(FixedVector<T, N, Storage> &&other); + FixedVector<T, N, Storage> &operator=(std::initializer_list<value_type> init); + + ~FixedVector(); + + reference at(size_type pos); + const_reference at(size_type pos) const; + + reference operator[](size_type pos); + const_reference operator[](size_type pos) const; + + pointer data(); + const_pointer data() const; + + iterator begin(); + const_iterator begin() const; + + iterator end(); + const_iterator end() const; + + bool empty() const; + size_type size() const; + static constexpr size_type max_size(); + + void clear(); + + void push_back(const value_type &value); + void push_back(value_type &&value); + + void pop_back(); + reference back(); + const_reference back() const; + + void swap(FixedVector<T, N, Storage> &other); + + void resize(size_type count); + void resize(size_type count, const value_type &value); + + bool full() const; + + private: + void assign_from_initializer_list(std::initializer_list<value_type> init); + + Storage mStorage; + size_type mSize = 0; +}; + +template <class T, size_t N, class Storage> +bool operator==(const FixedVector<T, N, Storage> &a, const FixedVector<T, N, Storage> &b) +{ + return a.size() == b.size() && std::equal(a.begin(), a.end(), b.begin()); +} + +template <class T, size_t N, class Storage> +bool operator!=(const FixedVector<T, N, Storage> &a, const FixedVector<T, N, Storage> &b) +{ + return !(a == b); +} + +template <class T, size_t N, class Storage> +FixedVector<T, N, Storage>::FixedVector() = default; + +template <class T, size_t N, class Storage> +FixedVector<T, N, Storage>::FixedVector(size_type count, const value_type &value) : mSize(count) +{ + ASSERT(count <= N); + std::fill(mStorage.begin(), mStorage.begin() + count, value); +} + +template <class T, size_t N, class Storage> +FixedVector<T, N, Storage>::FixedVector(size_type count) : mSize(count) +{ + ASSERT(count <= N); +} + +template <class T, size_t N, class Storage> +FixedVector<T, N, Storage>::FixedVector(const FixedVector<T, N, Storage> &other) = default; + +template <class T, size_t N, class Storage> +FixedVector<T, N, Storage>::FixedVector(FixedVector<T, N, Storage> &&other) = default; + +template <class T, size_t N, class Storage> +FixedVector<T, N, Storage>::FixedVector(std::initializer_list<value_type> init) +{ + ASSERT(init.size() <= N); + assign_from_initializer_list(init); +} + +template <class T, size_t N, class Storage> +FixedVector<T, N, Storage> &FixedVector<T, N, Storage>::operator=( + const FixedVector<T, N, Storage> &other) = default; + +template <class T, size_t N, class Storage> +FixedVector<T, N, Storage> &FixedVector<T, N, Storage>::operator=( + FixedVector<T, N, Storage> &&other) = default; + +template <class T, size_t N, class Storage> +FixedVector<T, N, Storage> &FixedVector<T, N, Storage>::operator=( + std::initializer_list<value_type> init) +{ + clear(); + ASSERT(init.size() <= N); + assign_from_initializer_list(init); + return this; +} + +template <class T, size_t N, class Storage> +FixedVector<T, N, Storage>::~FixedVector() +{ + clear(); +} + +template <class T, size_t N, class Storage> +typename FixedVector<T, N, Storage>::reference FixedVector<T, N, Storage>::at(size_type pos) +{ + ASSERT(pos < N); + return mStorage.at(pos); +} + +template <class T, size_t N, class Storage> +typename FixedVector<T, N, Storage>::const_reference FixedVector<T, N, Storage>::at( + size_type pos) const +{ + ASSERT(pos < N); + return mStorage.at(pos); +} + +template <class T, size_t N, class Storage> +typename FixedVector<T, N, Storage>::reference FixedVector<T, N, Storage>::operator[](size_type pos) +{ + ASSERT(pos < N); + return mStorage[pos]; +} + +template <class T, size_t N, class Storage> +typename FixedVector<T, N, Storage>::const_reference FixedVector<T, N, Storage>::operator[]( + size_type pos) const +{ + ASSERT(pos < N); + return mStorage[pos]; +} + +template <class T, size_t N, class Storage> +typename FixedVector<T, N, Storage>::const_pointer angle::FixedVector<T, N, Storage>::data() const +{ + return mStorage.data(); +} + +template <class T, size_t N, class Storage> +typename FixedVector<T, N, Storage>::pointer angle::FixedVector<T, N, Storage>::data() +{ + return mStorage.data(); +} + +template <class T, size_t N, class Storage> +typename FixedVector<T, N, Storage>::iterator FixedVector<T, N, Storage>::begin() +{ + return mStorage.begin(); +} + +template <class T, size_t N, class Storage> +typename FixedVector<T, N, Storage>::const_iterator FixedVector<T, N, Storage>::begin() const +{ + return mStorage.begin(); +} + +template <class T, size_t N, class Storage> +typename FixedVector<T, N, Storage>::iterator FixedVector<T, N, Storage>::end() +{ + return mStorage.begin() + mSize; +} + +template <class T, size_t N, class Storage> +typename FixedVector<T, N, Storage>::const_iterator FixedVector<T, N, Storage>::end() const +{ + return mStorage.begin() + mSize; +} + +template <class T, size_t N, class Storage> +bool FixedVector<T, N, Storage>::empty() const +{ + return mSize == 0; +} + +template <class T, size_t N, class Storage> +typename FixedVector<T, N, Storage>::size_type FixedVector<T, N, Storage>::size() const +{ + return mSize; +} + +template <class T, size_t N, class Storage> +constexpr typename FixedVector<T, N, Storage>::size_type FixedVector<T, N, Storage>::max_size() +{ + return N; +} + +template <class T, size_t N, class Storage> +void FixedVector<T, N, Storage>::clear() +{ + resize(0); +} + +template <class T, size_t N, class Storage> +void FixedVector<T, N, Storage>::push_back(const value_type &value) +{ + ASSERT(mSize < N); + mStorage[mSize] = value; + mSize++; +} + +template <class T, size_t N, class Storage> +void FixedVector<T, N, Storage>::push_back(value_type &&value) +{ + ASSERT(mSize < N); + mStorage[mSize] = std::move(value); + mSize++; +} + +template <class T, size_t N, class Storage> +void FixedVector<T, N, Storage>::pop_back() +{ + ASSERT(mSize > 0); + mSize--; +} + +template <class T, size_t N, class Storage> +typename FixedVector<T, N, Storage>::reference FixedVector<T, N, Storage>::back() +{ + ASSERT(mSize > 0); + return mStorage[mSize - 1]; +} + +template <class T, size_t N, class Storage> +typename FixedVector<T, N, Storage>::const_reference FixedVector<T, N, Storage>::back() const +{ + ASSERT(mSize > 0); + return mStorage[mSize - 1]; +} + +template <class T, size_t N, class Storage> +void FixedVector<T, N, Storage>::swap(FixedVector<T, N, Storage> &other) +{ + std::swap(mSize, other.mSize); + std::swap(mStorage, other.mStorage); +} + +template <class T, size_t N, class Storage> +void FixedVector<T, N, Storage>::resize(size_type count) +{ + ASSERT(count <= N); + while (mSize > count) + { + mSize--; + mStorage[mSize] = value_type(); + } + while (mSize < count) + { + mStorage[mSize] = value_type(); + mSize++; + } +} + +template <class T, size_t N, class Storage> +void FixedVector<T, N, Storage>::resize(size_type count, const value_type &value) +{ + ASSERT(count <= N); + while (mSize > count) + { + mSize--; + mStorage[mSize] = value_type(); + } + while (mSize < count) + { + mStorage[mSize] = value; + mSize++; + } +} + +template <class T, size_t N, class Storage> +void FixedVector<T, N, Storage>::assign_from_initializer_list( + std::initializer_list<value_type> init) +{ + for (auto element : init) + { + mStorage[mSize] = std::move(element); + mSize++; + } +} + +template <class T, size_t N, class Storage> +bool FixedVector<T, N, Storage>::full() const +{ + return (mSize == N); +} +} // namespace angle + +#endif // COMMON_FIXEDVECTOR_H_ diff --git a/gfx/angle/checkout/src/common/Float16ToFloat32.cpp b/gfx/angle/checkout/src/common/Float16ToFloat32.cpp new file mode 100644 index 0000000000..6c21987704 --- /dev/null +++ b/gfx/angle/checkout/src/common/Float16ToFloat32.cpp @@ -0,0 +1,300 @@ +// +// Copyright (c) 2012 The ANGLE Project Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// + +// This file is automatically generated. + +#include "common/mathutil.h" + +namespace gl +{ + +const static unsigned g_mantissa[2048] = { + 0x00000000, 0x33800000, 0x34000000, 0x34400000, 0x34800000, 0x34a00000, 0x34c00000, 0x34e00000, + 0x35000000, 0x35100000, 0x35200000, 0x35300000, 0x35400000, 0x35500000, 0x35600000, 0x35700000, + 0x35800000, 0x35880000, 0x35900000, 0x35980000, 0x35a00000, 0x35a80000, 0x35b00000, 0x35b80000, + 0x35c00000, 0x35c80000, 0x35d00000, 0x35d80000, 0x35e00000, 0x35e80000, 0x35f00000, 0x35f80000, + 0x36000000, 0x36040000, 0x36080000, 0x360c0000, 0x36100000, 0x36140000, 0x36180000, 0x361c0000, + 0x36200000, 0x36240000, 0x36280000, 0x362c0000, 0x36300000, 0x36340000, 0x36380000, 0x363c0000, + 0x36400000, 0x36440000, 0x36480000, 0x364c0000, 0x36500000, 0x36540000, 0x36580000, 0x365c0000, + 0x36600000, 0x36640000, 0x36680000, 0x366c0000, 0x36700000, 0x36740000, 0x36780000, 0x367c0000, + 0x36800000, 0x36820000, 0x36840000, 0x36860000, 0x36880000, 0x368a0000, 0x368c0000, 0x368e0000, + 0x36900000, 0x36920000, 0x36940000, 0x36960000, 0x36980000, 0x369a0000, 0x369c0000, 0x369e0000, + 0x36a00000, 0x36a20000, 0x36a40000, 0x36a60000, 0x36a80000, 0x36aa0000, 0x36ac0000, 0x36ae0000, + 0x36b00000, 0x36b20000, 0x36b40000, 0x36b60000, 0x36b80000, 0x36ba0000, 0x36bc0000, 0x36be0000, + 0x36c00000, 0x36c20000, 0x36c40000, 0x36c60000, 0x36c80000, 0x36ca0000, 0x36cc0000, 0x36ce0000, + 0x36d00000, 0x36d20000, 0x36d40000, 0x36d60000, 0x36d80000, 0x36da0000, 0x36dc0000, 0x36de0000, + 0x36e00000, 0x36e20000, 0x36e40000, 0x36e60000, 0x36e80000, 0x36ea0000, 0x36ec0000, 0x36ee0000, + 0x36f00000, 0x36f20000, 0x36f40000, 0x36f60000, 0x36f80000, 0x36fa0000, 0x36fc0000, 0x36fe0000, + 0x37000000, 0x37010000, 0x37020000, 0x37030000, 0x37040000, 0x37050000, 0x37060000, 0x37070000, + 0x37080000, 0x37090000, 0x370a0000, 0x370b0000, 0x370c0000, 0x370d0000, 0x370e0000, 0x370f0000, + 0x37100000, 0x37110000, 0x37120000, 0x37130000, 0x37140000, 0x37150000, 0x37160000, 0x37170000, + 0x37180000, 0x37190000, 0x371a0000, 0x371b0000, 0x371c0000, 0x371d0000, 0x371e0000, 0x371f0000, + 0x37200000, 0x37210000, 0x37220000, 0x37230000, 0x37240000, 0x37250000, 0x37260000, 0x37270000, + 0x37280000, 0x37290000, 0x372a0000, 0x372b0000, 0x372c0000, 0x372d0000, 0x372e0000, 0x372f0000, + 0x37300000, 0x37310000, 0x37320000, 0x37330000, 0x37340000, 0x37350000, 0x37360000, 0x37370000, + 0x37380000, 0x37390000, 0x373a0000, 0x373b0000, 0x373c0000, 0x373d0000, 0x373e0000, 0x373f0000, + 0x37400000, 0x37410000, 0x37420000, 0x37430000, 0x37440000, 0x37450000, 0x37460000, 0x37470000, + 0x37480000, 0x37490000, 0x374a0000, 0x374b0000, 0x374c0000, 0x374d0000, 0x374e0000, 0x374f0000, + 0x37500000, 0x37510000, 0x37520000, 0x37530000, 0x37540000, 0x37550000, 0x37560000, 0x37570000, + 0x37580000, 0x37590000, 0x375a0000, 0x375b0000, 0x375c0000, 0x375d0000, 0x375e0000, 0x375f0000, + 0x37600000, 0x37610000, 0x37620000, 0x37630000, 0x37640000, 0x37650000, 0x37660000, 0x37670000, + 0x37680000, 0x37690000, 0x376a0000, 0x376b0000, 0x376c0000, 0x376d0000, 0x376e0000, 0x376f0000, + 0x37700000, 0x37710000, 0x37720000, 0x37730000, 0x37740000, 0x37750000, 0x37760000, 0x37770000, + 0x37780000, 0x37790000, 0x377a0000, 0x377b0000, 0x377c0000, 0x377d0000, 0x377e0000, 0x377f0000, + 0x37800000, 0x37808000, 0x37810000, 0x37818000, 0x37820000, 0x37828000, 0x37830000, 0x37838000, + 0x37840000, 0x37848000, 0x37850000, 0x37858000, 0x37860000, 0x37868000, 0x37870000, 0x37878000, + 0x37880000, 0x37888000, 0x37890000, 0x37898000, 0x378a0000, 0x378a8000, 0x378b0000, 0x378b8000, + 0x378c0000, 0x378c8000, 0x378d0000, 0x378d8000, 0x378e0000, 0x378e8000, 0x378f0000, 0x378f8000, + 0x37900000, 0x37908000, 0x37910000, 0x37918000, 0x37920000, 0x37928000, 0x37930000, 0x37938000, + 0x37940000, 0x37948000, 0x37950000, 0x37958000, 0x37960000, 0x37968000, 0x37970000, 0x37978000, + 0x37980000, 0x37988000, 0x37990000, 0x37998000, 0x379a0000, 0x379a8000, 0x379b0000, 0x379b8000, + 0x379c0000, 0x379c8000, 0x379d0000, 0x379d8000, 0x379e0000, 0x379e8000, 0x379f0000, 0x379f8000, + 0x37a00000, 0x37a08000, 0x37a10000, 0x37a18000, 0x37a20000, 0x37a28000, 0x37a30000, 0x37a38000, + 0x37a40000, 0x37a48000, 0x37a50000, 0x37a58000, 0x37a60000, 0x37a68000, 0x37a70000, 0x37a78000, + 0x37a80000, 0x37a88000, 0x37a90000, 0x37a98000, 0x37aa0000, 0x37aa8000, 0x37ab0000, 0x37ab8000, + 0x37ac0000, 0x37ac8000, 0x37ad0000, 0x37ad8000, 0x37ae0000, 0x37ae8000, 0x37af0000, 0x37af8000, + 0x37b00000, 0x37b08000, 0x37b10000, 0x37b18000, 0x37b20000, 0x37b28000, 0x37b30000, 0x37b38000, + 0x37b40000, 0x37b48000, 0x37b50000, 0x37b58000, 0x37b60000, 0x37b68000, 0x37b70000, 0x37b78000, + 0x37b80000, 0x37b88000, 0x37b90000, 0x37b98000, 0x37ba0000, 0x37ba8000, 0x37bb0000, 0x37bb8000, + 0x37bc0000, 0x37bc8000, 0x37bd0000, 0x37bd8000, 0x37be0000, 0x37be8000, 0x37bf0000, 0x37bf8000, + 0x37c00000, 0x37c08000, 0x37c10000, 0x37c18000, 0x37c20000, 0x37c28000, 0x37c30000, 0x37c38000, + 0x37c40000, 0x37c48000, 0x37c50000, 0x37c58000, 0x37c60000, 0x37c68000, 0x37c70000, 0x37c78000, + 0x37c80000, 0x37c88000, 0x37c90000, 0x37c98000, 0x37ca0000, 0x37ca8000, 0x37cb0000, 0x37cb8000, + 0x37cc0000, 0x37cc8000, 0x37cd0000, 0x37cd8000, 0x37ce0000, 0x37ce8000, 0x37cf0000, 0x37cf8000, + 0x37d00000, 0x37d08000, 0x37d10000, 0x37d18000, 0x37d20000, 0x37d28000, 0x37d30000, 0x37d38000, + 0x37d40000, 0x37d48000, 0x37d50000, 0x37d58000, 0x37d60000, 0x37d68000, 0x37d70000, 0x37d78000, + 0x37d80000, 0x37d88000, 0x37d90000, 0x37d98000, 0x37da0000, 0x37da8000, 0x37db0000, 0x37db8000, + 0x37dc0000, 0x37dc8000, 0x37dd0000, 0x37dd8000, 0x37de0000, 0x37de8000, 0x37df0000, 0x37df8000, + 0x37e00000, 0x37e08000, 0x37e10000, 0x37e18000, 0x37e20000, 0x37e28000, 0x37e30000, 0x37e38000, + 0x37e40000, 0x37e48000, 0x37e50000, 0x37e58000, 0x37e60000, 0x37e68000, 0x37e70000, 0x37e78000, + 0x37e80000, 0x37e88000, 0x37e90000, 0x37e98000, 0x37ea0000, 0x37ea8000, 0x37eb0000, 0x37eb8000, + 0x37ec0000, 0x37ec8000, 0x37ed0000, 0x37ed8000, 0x37ee0000, 0x37ee8000, 0x37ef0000, 0x37ef8000, + 0x37f00000, 0x37f08000, 0x37f10000, 0x37f18000, 0x37f20000, 0x37f28000, 0x37f30000, 0x37f38000, + 0x37f40000, 0x37f48000, 0x37f50000, 0x37f58000, 0x37f60000, 0x37f68000, 0x37f70000, 0x37f78000, + 0x37f80000, 0x37f88000, 0x37f90000, 0x37f98000, 0x37fa0000, 0x37fa8000, 0x37fb0000, 0x37fb8000, + 0x37fc0000, 0x37fc8000, 0x37fd0000, 0x37fd8000, 0x37fe0000, 0x37fe8000, 0x37ff0000, 0x37ff8000, + 0x38000000, 0x38004000, 0x38008000, 0x3800c000, 0x38010000, 0x38014000, 0x38018000, 0x3801c000, + 0x38020000, 0x38024000, 0x38028000, 0x3802c000, 0x38030000, 0x38034000, 0x38038000, 0x3803c000, + 0x38040000, 0x38044000, 0x38048000, 0x3804c000, 0x38050000, 0x38054000, 0x38058000, 0x3805c000, + 0x38060000, 0x38064000, 0x38068000, 0x3806c000, 0x38070000, 0x38074000, 0x38078000, 0x3807c000, + 0x38080000, 0x38084000, 0x38088000, 0x3808c000, 0x38090000, 0x38094000, 0x38098000, 0x3809c000, + 0x380a0000, 0x380a4000, 0x380a8000, 0x380ac000, 0x380b0000, 0x380b4000, 0x380b8000, 0x380bc000, + 0x380c0000, 0x380c4000, 0x380c8000, 0x380cc000, 0x380d0000, 0x380d4000, 0x380d8000, 0x380dc000, + 0x380e0000, 0x380e4000, 0x380e8000, 0x380ec000, 0x380f0000, 0x380f4000, 0x380f8000, 0x380fc000, + 0x38100000, 0x38104000, 0x38108000, 0x3810c000, 0x38110000, 0x38114000, 0x38118000, 0x3811c000, + 0x38120000, 0x38124000, 0x38128000, 0x3812c000, 0x38130000, 0x38134000, 0x38138000, 0x3813c000, + 0x38140000, 0x38144000, 0x38148000, 0x3814c000, 0x38150000, 0x38154000, 0x38158000, 0x3815c000, + 0x38160000, 0x38164000, 0x38168000, 0x3816c000, 0x38170000, 0x38174000, 0x38178000, 0x3817c000, + 0x38180000, 0x38184000, 0x38188000, 0x3818c000, 0x38190000, 0x38194000, 0x38198000, 0x3819c000, + 0x381a0000, 0x381a4000, 0x381a8000, 0x381ac000, 0x381b0000, 0x381b4000, 0x381b8000, 0x381bc000, + 0x381c0000, 0x381c4000, 0x381c8000, 0x381cc000, 0x381d0000, 0x381d4000, 0x381d8000, 0x381dc000, + 0x381e0000, 0x381e4000, 0x381e8000, 0x381ec000, 0x381f0000, 0x381f4000, 0x381f8000, 0x381fc000, + 0x38200000, 0x38204000, 0x38208000, 0x3820c000, 0x38210000, 0x38214000, 0x38218000, 0x3821c000, + 0x38220000, 0x38224000, 0x38228000, 0x3822c000, 0x38230000, 0x38234000, 0x38238000, 0x3823c000, + 0x38240000, 0x38244000, 0x38248000, 0x3824c000, 0x38250000, 0x38254000, 0x38258000, 0x3825c000, + 0x38260000, 0x38264000, 0x38268000, 0x3826c000, 0x38270000, 0x38274000, 0x38278000, 0x3827c000, + 0x38280000, 0x38284000, 0x38288000, 0x3828c000, 0x38290000, 0x38294000, 0x38298000, 0x3829c000, + 0x382a0000, 0x382a4000, 0x382a8000, 0x382ac000, 0x382b0000, 0x382b4000, 0x382b8000, 0x382bc000, + 0x382c0000, 0x382c4000, 0x382c8000, 0x382cc000, 0x382d0000, 0x382d4000, 0x382d8000, 0x382dc000, + 0x382e0000, 0x382e4000, 0x382e8000, 0x382ec000, 0x382f0000, 0x382f4000, 0x382f8000, 0x382fc000, + 0x38300000, 0x38304000, 0x38308000, 0x3830c000, 0x38310000, 0x38314000, 0x38318000, 0x3831c000, + 0x38320000, 0x38324000, 0x38328000, 0x3832c000, 0x38330000, 0x38334000, 0x38338000, 0x3833c000, + 0x38340000, 0x38344000, 0x38348000, 0x3834c000, 0x38350000, 0x38354000, 0x38358000, 0x3835c000, + 0x38360000, 0x38364000, 0x38368000, 0x3836c000, 0x38370000, 0x38374000, 0x38378000, 0x3837c000, + 0x38380000, 0x38384000, 0x38388000, 0x3838c000, 0x38390000, 0x38394000, 0x38398000, 0x3839c000, + 0x383a0000, 0x383a4000, 0x383a8000, 0x383ac000, 0x383b0000, 0x383b4000, 0x383b8000, 0x383bc000, + 0x383c0000, 0x383c4000, 0x383c8000, 0x383cc000, 0x383d0000, 0x383d4000, 0x383d8000, 0x383dc000, + 0x383e0000, 0x383e4000, 0x383e8000, 0x383ec000, 0x383f0000, 0x383f4000, 0x383f8000, 0x383fc000, + 0x38400000, 0x38404000, 0x38408000, 0x3840c000, 0x38410000, 0x38414000, 0x38418000, 0x3841c000, + 0x38420000, 0x38424000, 0x38428000, 0x3842c000, 0x38430000, 0x38434000, 0x38438000, 0x3843c000, + 0x38440000, 0x38444000, 0x38448000, 0x3844c000, 0x38450000, 0x38454000, 0x38458000, 0x3845c000, + 0x38460000, 0x38464000, 0x38468000, 0x3846c000, 0x38470000, 0x38474000, 0x38478000, 0x3847c000, + 0x38480000, 0x38484000, 0x38488000, 0x3848c000, 0x38490000, 0x38494000, 0x38498000, 0x3849c000, + 0x384a0000, 0x384a4000, 0x384a8000, 0x384ac000, 0x384b0000, 0x384b4000, 0x384b8000, 0x384bc000, + 0x384c0000, 0x384c4000, 0x384c8000, 0x384cc000, 0x384d0000, 0x384d4000, 0x384d8000, 0x384dc000, + 0x384e0000, 0x384e4000, 0x384e8000, 0x384ec000, 0x384f0000, 0x384f4000, 0x384f8000, 0x384fc000, + 0x38500000, 0x38504000, 0x38508000, 0x3850c000, 0x38510000, 0x38514000, 0x38518000, 0x3851c000, + 0x38520000, 0x38524000, 0x38528000, 0x3852c000, 0x38530000, 0x38534000, 0x38538000, 0x3853c000, + 0x38540000, 0x38544000, 0x38548000, 0x3854c000, 0x38550000, 0x38554000, 0x38558000, 0x3855c000, + 0x38560000, 0x38564000, 0x38568000, 0x3856c000, 0x38570000, 0x38574000, 0x38578000, 0x3857c000, + 0x38580000, 0x38584000, 0x38588000, 0x3858c000, 0x38590000, 0x38594000, 0x38598000, 0x3859c000, + 0x385a0000, 0x385a4000, 0x385a8000, 0x385ac000, 0x385b0000, 0x385b4000, 0x385b8000, 0x385bc000, + 0x385c0000, 0x385c4000, 0x385c8000, 0x385cc000, 0x385d0000, 0x385d4000, 0x385d8000, 0x385dc000, + 0x385e0000, 0x385e4000, 0x385e8000, 0x385ec000, 0x385f0000, 0x385f4000, 0x385f8000, 0x385fc000, + 0x38600000, 0x38604000, 0x38608000, 0x3860c000, 0x38610000, 0x38614000, 0x38618000, 0x3861c000, + 0x38620000, 0x38624000, 0x38628000, 0x3862c000, 0x38630000, 0x38634000, 0x38638000, 0x3863c000, + 0x38640000, 0x38644000, 0x38648000, 0x3864c000, 0x38650000, 0x38654000, 0x38658000, 0x3865c000, + 0x38660000, 0x38664000, 0x38668000, 0x3866c000, 0x38670000, 0x38674000, 0x38678000, 0x3867c000, + 0x38680000, 0x38684000, 0x38688000, 0x3868c000, 0x38690000, 0x38694000, 0x38698000, 0x3869c000, + 0x386a0000, 0x386a4000, 0x386a8000, 0x386ac000, 0x386b0000, 0x386b4000, 0x386b8000, 0x386bc000, + 0x386c0000, 0x386c4000, 0x386c8000, 0x386cc000, 0x386d0000, 0x386d4000, 0x386d8000, 0x386dc000, + 0x386e0000, 0x386e4000, 0x386e8000, 0x386ec000, 0x386f0000, 0x386f4000, 0x386f8000, 0x386fc000, + 0x38700000, 0x38704000, 0x38708000, 0x3870c000, 0x38710000, 0x38714000, 0x38718000, 0x3871c000, + 0x38720000, 0x38724000, 0x38728000, 0x3872c000, 0x38730000, 0x38734000, 0x38738000, 0x3873c000, + 0x38740000, 0x38744000, 0x38748000, 0x3874c000, 0x38750000, 0x38754000, 0x38758000, 0x3875c000, + 0x38760000, 0x38764000, 0x38768000, 0x3876c000, 0x38770000, 0x38774000, 0x38778000, 0x3877c000, + 0x38780000, 0x38784000, 0x38788000, 0x3878c000, 0x38790000, 0x38794000, 0x38798000, 0x3879c000, + 0x387a0000, 0x387a4000, 0x387a8000, 0x387ac000, 0x387b0000, 0x387b4000, 0x387b8000, 0x387bc000, + 0x387c0000, 0x387c4000, 0x387c8000, 0x387cc000, 0x387d0000, 0x387d4000, 0x387d8000, 0x387dc000, + 0x387e0000, 0x387e4000, 0x387e8000, 0x387ec000, 0x387f0000, 0x387f4000, 0x387f8000, 0x387fc000, + 0x38000000, 0x38002000, 0x38004000, 0x38006000, 0x38008000, 0x3800a000, 0x3800c000, 0x3800e000, + 0x38010000, 0x38012000, 0x38014000, 0x38016000, 0x38018000, 0x3801a000, 0x3801c000, 0x3801e000, + 0x38020000, 0x38022000, 0x38024000, 0x38026000, 0x38028000, 0x3802a000, 0x3802c000, 0x3802e000, + 0x38030000, 0x38032000, 0x38034000, 0x38036000, 0x38038000, 0x3803a000, 0x3803c000, 0x3803e000, + 0x38040000, 0x38042000, 0x38044000, 0x38046000, 0x38048000, 0x3804a000, 0x3804c000, 0x3804e000, + 0x38050000, 0x38052000, 0x38054000, 0x38056000, 0x38058000, 0x3805a000, 0x3805c000, 0x3805e000, + 0x38060000, 0x38062000, 0x38064000, 0x38066000, 0x38068000, 0x3806a000, 0x3806c000, 0x3806e000, + 0x38070000, 0x38072000, 0x38074000, 0x38076000, 0x38078000, 0x3807a000, 0x3807c000, 0x3807e000, + 0x38080000, 0x38082000, 0x38084000, 0x38086000, 0x38088000, 0x3808a000, 0x3808c000, 0x3808e000, + 0x38090000, 0x38092000, 0x38094000, 0x38096000, 0x38098000, 0x3809a000, 0x3809c000, 0x3809e000, + 0x380a0000, 0x380a2000, 0x380a4000, 0x380a6000, 0x380a8000, 0x380aa000, 0x380ac000, 0x380ae000, + 0x380b0000, 0x380b2000, 0x380b4000, 0x380b6000, 0x380b8000, 0x380ba000, 0x380bc000, 0x380be000, + 0x380c0000, 0x380c2000, 0x380c4000, 0x380c6000, 0x380c8000, 0x380ca000, 0x380cc000, 0x380ce000, + 0x380d0000, 0x380d2000, 0x380d4000, 0x380d6000, 0x380d8000, 0x380da000, 0x380dc000, 0x380de000, + 0x380e0000, 0x380e2000, 0x380e4000, 0x380e6000, 0x380e8000, 0x380ea000, 0x380ec000, 0x380ee000, + 0x380f0000, 0x380f2000, 0x380f4000, 0x380f6000, 0x380f8000, 0x380fa000, 0x380fc000, 0x380fe000, + 0x38100000, 0x38102000, 0x38104000, 0x38106000, 0x38108000, 0x3810a000, 0x3810c000, 0x3810e000, + 0x38110000, 0x38112000, 0x38114000, 0x38116000, 0x38118000, 0x3811a000, 0x3811c000, 0x3811e000, + 0x38120000, 0x38122000, 0x38124000, 0x38126000, 0x38128000, 0x3812a000, 0x3812c000, 0x3812e000, + 0x38130000, 0x38132000, 0x38134000, 0x38136000, 0x38138000, 0x3813a000, 0x3813c000, 0x3813e000, + 0x38140000, 0x38142000, 0x38144000, 0x38146000, 0x38148000, 0x3814a000, 0x3814c000, 0x3814e000, + 0x38150000, 0x38152000, 0x38154000, 0x38156000, 0x38158000, 0x3815a000, 0x3815c000, 0x3815e000, + 0x38160000, 0x38162000, 0x38164000, 0x38166000, 0x38168000, 0x3816a000, 0x3816c000, 0x3816e000, + 0x38170000, 0x38172000, 0x38174000, 0x38176000, 0x38178000, 0x3817a000, 0x3817c000, 0x3817e000, + 0x38180000, 0x38182000, 0x38184000, 0x38186000, 0x38188000, 0x3818a000, 0x3818c000, 0x3818e000, + 0x38190000, 0x38192000, 0x38194000, 0x38196000, 0x38198000, 0x3819a000, 0x3819c000, 0x3819e000, + 0x381a0000, 0x381a2000, 0x381a4000, 0x381a6000, 0x381a8000, 0x381aa000, 0x381ac000, 0x381ae000, + 0x381b0000, 0x381b2000, 0x381b4000, 0x381b6000, 0x381b8000, 0x381ba000, 0x381bc000, 0x381be000, + 0x381c0000, 0x381c2000, 0x381c4000, 0x381c6000, 0x381c8000, 0x381ca000, 0x381cc000, 0x381ce000, + 0x381d0000, 0x381d2000, 0x381d4000, 0x381d6000, 0x381d8000, 0x381da000, 0x381dc000, 0x381de000, + 0x381e0000, 0x381e2000, 0x381e4000, 0x381e6000, 0x381e8000, 0x381ea000, 0x381ec000, 0x381ee000, + 0x381f0000, 0x381f2000, 0x381f4000, 0x381f6000, 0x381f8000, 0x381fa000, 0x381fc000, 0x381fe000, + 0x38200000, 0x38202000, 0x38204000, 0x38206000, 0x38208000, 0x3820a000, 0x3820c000, 0x3820e000, + 0x38210000, 0x38212000, 0x38214000, 0x38216000, 0x38218000, 0x3821a000, 0x3821c000, 0x3821e000, + 0x38220000, 0x38222000, 0x38224000, 0x38226000, 0x38228000, 0x3822a000, 0x3822c000, 0x3822e000, + 0x38230000, 0x38232000, 0x38234000, 0x38236000, 0x38238000, 0x3823a000, 0x3823c000, 0x3823e000, + 0x38240000, 0x38242000, 0x38244000, 0x38246000, 0x38248000, 0x3824a000, 0x3824c000, 0x3824e000, + 0x38250000, 0x38252000, 0x38254000, 0x38256000, 0x38258000, 0x3825a000, 0x3825c000, 0x3825e000, + 0x38260000, 0x38262000, 0x38264000, 0x38266000, 0x38268000, 0x3826a000, 0x3826c000, 0x3826e000, + 0x38270000, 0x38272000, 0x38274000, 0x38276000, 0x38278000, 0x3827a000, 0x3827c000, 0x3827e000, + 0x38280000, 0x38282000, 0x38284000, 0x38286000, 0x38288000, 0x3828a000, 0x3828c000, 0x3828e000, + 0x38290000, 0x38292000, 0x38294000, 0x38296000, 0x38298000, 0x3829a000, 0x3829c000, 0x3829e000, + 0x382a0000, 0x382a2000, 0x382a4000, 0x382a6000, 0x382a8000, 0x382aa000, 0x382ac000, 0x382ae000, + 0x382b0000, 0x382b2000, 0x382b4000, 0x382b6000, 0x382b8000, 0x382ba000, 0x382bc000, 0x382be000, + 0x382c0000, 0x382c2000, 0x382c4000, 0x382c6000, 0x382c8000, 0x382ca000, 0x382cc000, 0x382ce000, + 0x382d0000, 0x382d2000, 0x382d4000, 0x382d6000, 0x382d8000, 0x382da000, 0x382dc000, 0x382de000, + 0x382e0000, 0x382e2000, 0x382e4000, 0x382e6000, 0x382e8000, 0x382ea000, 0x382ec000, 0x382ee000, + 0x382f0000, 0x382f2000, 0x382f4000, 0x382f6000, 0x382f8000, 0x382fa000, 0x382fc000, 0x382fe000, + 0x38300000, 0x38302000, 0x38304000, 0x38306000, 0x38308000, 0x3830a000, 0x3830c000, 0x3830e000, + 0x38310000, 0x38312000, 0x38314000, 0x38316000, 0x38318000, 0x3831a000, 0x3831c000, 0x3831e000, + 0x38320000, 0x38322000, 0x38324000, 0x38326000, 0x38328000, 0x3832a000, 0x3832c000, 0x3832e000, + 0x38330000, 0x38332000, 0x38334000, 0x38336000, 0x38338000, 0x3833a000, 0x3833c000, 0x3833e000, + 0x38340000, 0x38342000, 0x38344000, 0x38346000, 0x38348000, 0x3834a000, 0x3834c000, 0x3834e000, + 0x38350000, 0x38352000, 0x38354000, 0x38356000, 0x38358000, 0x3835a000, 0x3835c000, 0x3835e000, + 0x38360000, 0x38362000, 0x38364000, 0x38366000, 0x38368000, 0x3836a000, 0x3836c000, 0x3836e000, + 0x38370000, 0x38372000, 0x38374000, 0x38376000, 0x38378000, 0x3837a000, 0x3837c000, 0x3837e000, + 0x38380000, 0x38382000, 0x38384000, 0x38386000, 0x38388000, 0x3838a000, 0x3838c000, 0x3838e000, + 0x38390000, 0x38392000, 0x38394000, 0x38396000, 0x38398000, 0x3839a000, 0x3839c000, 0x3839e000, + 0x383a0000, 0x383a2000, 0x383a4000, 0x383a6000, 0x383a8000, 0x383aa000, 0x383ac000, 0x383ae000, + 0x383b0000, 0x383b2000, 0x383b4000, 0x383b6000, 0x383b8000, 0x383ba000, 0x383bc000, 0x383be000, + 0x383c0000, 0x383c2000, 0x383c4000, 0x383c6000, 0x383c8000, 0x383ca000, 0x383cc000, 0x383ce000, + 0x383d0000, 0x383d2000, 0x383d4000, 0x383d6000, 0x383d8000, 0x383da000, 0x383dc000, 0x383de000, + 0x383e0000, 0x383e2000, 0x383e4000, 0x383e6000, 0x383e8000, 0x383ea000, 0x383ec000, 0x383ee000, + 0x383f0000, 0x383f2000, 0x383f4000, 0x383f6000, 0x383f8000, 0x383fa000, 0x383fc000, 0x383fe000, + 0x38400000, 0x38402000, 0x38404000, 0x38406000, 0x38408000, 0x3840a000, 0x3840c000, 0x3840e000, + 0x38410000, 0x38412000, 0x38414000, 0x38416000, 0x38418000, 0x3841a000, 0x3841c000, 0x3841e000, + 0x38420000, 0x38422000, 0x38424000, 0x38426000, 0x38428000, 0x3842a000, 0x3842c000, 0x3842e000, + 0x38430000, 0x38432000, 0x38434000, 0x38436000, 0x38438000, 0x3843a000, 0x3843c000, 0x3843e000, + 0x38440000, 0x38442000, 0x38444000, 0x38446000, 0x38448000, 0x3844a000, 0x3844c000, 0x3844e000, + 0x38450000, 0x38452000, 0x38454000, 0x38456000, 0x38458000, 0x3845a000, 0x3845c000, 0x3845e000, + 0x38460000, 0x38462000, 0x38464000, 0x38466000, 0x38468000, 0x3846a000, 0x3846c000, 0x3846e000, + 0x38470000, 0x38472000, 0x38474000, 0x38476000, 0x38478000, 0x3847a000, 0x3847c000, 0x3847e000, + 0x38480000, 0x38482000, 0x38484000, 0x38486000, 0x38488000, 0x3848a000, 0x3848c000, 0x3848e000, + 0x38490000, 0x38492000, 0x38494000, 0x38496000, 0x38498000, 0x3849a000, 0x3849c000, 0x3849e000, + 0x384a0000, 0x384a2000, 0x384a4000, 0x384a6000, 0x384a8000, 0x384aa000, 0x384ac000, 0x384ae000, + 0x384b0000, 0x384b2000, 0x384b4000, 0x384b6000, 0x384b8000, 0x384ba000, 0x384bc000, 0x384be000, + 0x384c0000, 0x384c2000, 0x384c4000, 0x384c6000, 0x384c8000, 0x384ca000, 0x384cc000, 0x384ce000, + 0x384d0000, 0x384d2000, 0x384d4000, 0x384d6000, 0x384d8000, 0x384da000, 0x384dc000, 0x384de000, + 0x384e0000, 0x384e2000, 0x384e4000, 0x384e6000, 0x384e8000, 0x384ea000, 0x384ec000, 0x384ee000, + 0x384f0000, 0x384f2000, 0x384f4000, 0x384f6000, 0x384f8000, 0x384fa000, 0x384fc000, 0x384fe000, + 0x38500000, 0x38502000, 0x38504000, 0x38506000, 0x38508000, 0x3850a000, 0x3850c000, 0x3850e000, + 0x38510000, 0x38512000, 0x38514000, 0x38516000, 0x38518000, 0x3851a000, 0x3851c000, 0x3851e000, + 0x38520000, 0x38522000, 0x38524000, 0x38526000, 0x38528000, 0x3852a000, 0x3852c000, 0x3852e000, + 0x38530000, 0x38532000, 0x38534000, 0x38536000, 0x38538000, 0x3853a000, 0x3853c000, 0x3853e000, + 0x38540000, 0x38542000, 0x38544000, 0x38546000, 0x38548000, 0x3854a000, 0x3854c000, 0x3854e000, + 0x38550000, 0x38552000, 0x38554000, 0x38556000, 0x38558000, 0x3855a000, 0x3855c000, 0x3855e000, + 0x38560000, 0x38562000, 0x38564000, 0x38566000, 0x38568000, 0x3856a000, 0x3856c000, 0x3856e000, + 0x38570000, 0x38572000, 0x38574000, 0x38576000, 0x38578000, 0x3857a000, 0x3857c000, 0x3857e000, + 0x38580000, 0x38582000, 0x38584000, 0x38586000, 0x38588000, 0x3858a000, 0x3858c000, 0x3858e000, + 0x38590000, 0x38592000, 0x38594000, 0x38596000, 0x38598000, 0x3859a000, 0x3859c000, 0x3859e000, + 0x385a0000, 0x385a2000, 0x385a4000, 0x385a6000, 0x385a8000, 0x385aa000, 0x385ac000, 0x385ae000, + 0x385b0000, 0x385b2000, 0x385b4000, 0x385b6000, 0x385b8000, 0x385ba000, 0x385bc000, 0x385be000, + 0x385c0000, 0x385c2000, 0x385c4000, 0x385c6000, 0x385c8000, 0x385ca000, 0x385cc000, 0x385ce000, + 0x385d0000, 0x385d2000, 0x385d4000, 0x385d6000, 0x385d8000, 0x385da000, 0x385dc000, 0x385de000, + 0x385e0000, 0x385e2000, 0x385e4000, 0x385e6000, 0x385e8000, 0x385ea000, 0x385ec000, 0x385ee000, + 0x385f0000, 0x385f2000, 0x385f4000, 0x385f6000, 0x385f8000, 0x385fa000, 0x385fc000, 0x385fe000, + 0x38600000, 0x38602000, 0x38604000, 0x38606000, 0x38608000, 0x3860a000, 0x3860c000, 0x3860e000, + 0x38610000, 0x38612000, 0x38614000, 0x38616000, 0x38618000, 0x3861a000, 0x3861c000, 0x3861e000, + 0x38620000, 0x38622000, 0x38624000, 0x38626000, 0x38628000, 0x3862a000, 0x3862c000, 0x3862e000, + 0x38630000, 0x38632000, 0x38634000, 0x38636000, 0x38638000, 0x3863a000, 0x3863c000, 0x3863e000, + 0x38640000, 0x38642000, 0x38644000, 0x38646000, 0x38648000, 0x3864a000, 0x3864c000, 0x3864e000, + 0x38650000, 0x38652000, 0x38654000, 0x38656000, 0x38658000, 0x3865a000, 0x3865c000, 0x3865e000, + 0x38660000, 0x38662000, 0x38664000, 0x38666000, 0x38668000, 0x3866a000, 0x3866c000, 0x3866e000, + 0x38670000, 0x38672000, 0x38674000, 0x38676000, 0x38678000, 0x3867a000, 0x3867c000, 0x3867e000, + 0x38680000, 0x38682000, 0x38684000, 0x38686000, 0x38688000, 0x3868a000, 0x3868c000, 0x3868e000, + 0x38690000, 0x38692000, 0x38694000, 0x38696000, 0x38698000, 0x3869a000, 0x3869c000, 0x3869e000, + 0x386a0000, 0x386a2000, 0x386a4000, 0x386a6000, 0x386a8000, 0x386aa000, 0x386ac000, 0x386ae000, + 0x386b0000, 0x386b2000, 0x386b4000, 0x386b6000, 0x386b8000, 0x386ba000, 0x386bc000, 0x386be000, + 0x386c0000, 0x386c2000, 0x386c4000, 0x386c6000, 0x386c8000, 0x386ca000, 0x386cc000, 0x386ce000, + 0x386d0000, 0x386d2000, 0x386d4000, 0x386d6000, 0x386d8000, 0x386da000, 0x386dc000, 0x386de000, + 0x386e0000, 0x386e2000, 0x386e4000, 0x386e6000, 0x386e8000, 0x386ea000, 0x386ec000, 0x386ee000, + 0x386f0000, 0x386f2000, 0x386f4000, 0x386f6000, 0x386f8000, 0x386fa000, 0x386fc000, 0x386fe000, + 0x38700000, 0x38702000, 0x38704000, 0x38706000, 0x38708000, 0x3870a000, 0x3870c000, 0x3870e000, + 0x38710000, 0x38712000, 0x38714000, 0x38716000, 0x38718000, 0x3871a000, 0x3871c000, 0x3871e000, + 0x38720000, 0x38722000, 0x38724000, 0x38726000, 0x38728000, 0x3872a000, 0x3872c000, 0x3872e000, + 0x38730000, 0x38732000, 0x38734000, 0x38736000, 0x38738000, 0x3873a000, 0x3873c000, 0x3873e000, + 0x38740000, 0x38742000, 0x38744000, 0x38746000, 0x38748000, 0x3874a000, 0x3874c000, 0x3874e000, + 0x38750000, 0x38752000, 0x38754000, 0x38756000, 0x38758000, 0x3875a000, 0x3875c000, 0x3875e000, + 0x38760000, 0x38762000, 0x38764000, 0x38766000, 0x38768000, 0x3876a000, 0x3876c000, 0x3876e000, + 0x38770000, 0x38772000, 0x38774000, 0x38776000, 0x38778000, 0x3877a000, 0x3877c000, 0x3877e000, + 0x38780000, 0x38782000, 0x38784000, 0x38786000, 0x38788000, 0x3878a000, 0x3878c000, 0x3878e000, + 0x38790000, 0x38792000, 0x38794000, 0x38796000, 0x38798000, 0x3879a000, 0x3879c000, 0x3879e000, + 0x387a0000, 0x387a2000, 0x387a4000, 0x387a6000, 0x387a8000, 0x387aa000, 0x387ac000, 0x387ae000, + 0x387b0000, 0x387b2000, 0x387b4000, 0x387b6000, 0x387b8000, 0x387ba000, 0x387bc000, 0x387be000, + 0x387c0000, 0x387c2000, 0x387c4000, 0x387c6000, 0x387c8000, 0x387ca000, 0x387cc000, 0x387ce000, + 0x387d0000, 0x387d2000, 0x387d4000, 0x387d6000, 0x387d8000, 0x387da000, 0x387dc000, 0x387de000, + 0x387e0000, 0x387e2000, 0x387e4000, 0x387e6000, 0x387e8000, 0x387ea000, 0x387ec000, 0x387ee000, + 0x387f0000, 0x387f2000, 0x387f4000, 0x387f6000, 0x387f8000, 0x387fa000, 0x387fc000, 0x387fe000, +}; + +const static unsigned g_exponent[64] = { + 0x00000000, 0x00800000, 0x01000000, 0x01800000, 0x02000000, 0x02800000, 0x03000000, 0x03800000, + 0x04000000, 0x04800000, 0x05000000, 0x05800000, 0x06000000, 0x06800000, 0x07000000, 0x07800000, + 0x08000000, 0x08800000, 0x09000000, 0x09800000, 0x0a000000, 0x0a800000, 0x0b000000, 0x0b800000, + 0x0c000000, 0x0c800000, 0x0d000000, 0x0d800000, 0x0e000000, 0x0e800000, 0x0f000000, 0x47800000, + 0x80000000, 0x80800000, 0x81000000, 0x81800000, 0x82000000, 0x82800000, 0x83000000, 0x83800000, + 0x84000000, 0x84800000, 0x85000000, 0x85800000, 0x86000000, 0x86800000, 0x87000000, 0x87800000, + 0x88000000, 0x88800000, 0x89000000, 0x89800000, 0x8a000000, 0x8a800000, 0x8b000000, 0x8b800000, + 0x8c000000, 0x8c800000, 0x8d000000, 0x8d800000, 0x8e000000, 0x8e800000, 0x8f000000, 0xc7800000, +}; + +const static unsigned g_offset[64] = { + 0x00000000, 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400, + 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400, + 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400, + 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400, + 0x00000000, 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400, + 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400, + 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400, + 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400, 0x00000400, +}; + +float float16ToFloat32(unsigned short h) +{ + unsigned i32 = g_mantissa[g_offset[h >> 10] + (h & 0x3ff)] + g_exponent[h >> 10]; + return bitCast<float>(i32); +} +} // namespace gl diff --git a/gfx/angle/checkout/src/common/MemoryBuffer.cpp b/gfx/angle/checkout/src/common/MemoryBuffer.cpp new file mode 100644 index 0000000000..5e57be225d --- /dev/null +++ b/gfx/angle/checkout/src/common/MemoryBuffer.cpp @@ -0,0 +1,147 @@ +// +// Copyright (c) 2014 The ANGLE Project Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// + +#include "common/MemoryBuffer.h" + +#include <algorithm> +#include <cstdlib> + +#include "common/debug.h" + +namespace angle +{ + +// MemoryBuffer implementation. +MemoryBuffer::~MemoryBuffer() +{ + free(mData); + mData = nullptr; +} + +bool MemoryBuffer::resize(size_t size) +{ + if (size == 0) + { + free(mData); + mData = nullptr; + mSize = 0; + return true; + } + + if (size == mSize) + { + return true; + } + + // Only reallocate if the size has changed. + uint8_t *newMemory = static_cast<uint8_t *>(malloc(sizeof(uint8_t) * size)); + if (newMemory == nullptr) + { + return false; + } + + if (mData) + { + // Copy the intersection of the old data and the new data + std::copy(mData, mData + std::min(mSize, size), newMemory); + free(mData); + } + + mData = newMemory; + mSize = size; + + return true; +} + +void MemoryBuffer::fill(uint8_t datum) +{ + if (!empty()) + { + std::fill(mData, mData + mSize, datum); + } +} + +MemoryBuffer::MemoryBuffer(MemoryBuffer &&other) : MemoryBuffer() +{ + *this = std::move(other); +} + +MemoryBuffer &MemoryBuffer::operator=(MemoryBuffer &&other) +{ + std::swap(mSize, other.mSize); + std::swap(mData, other.mData); + return *this; +} + +// ScratchBuffer implementation. + +ScratchBuffer::ScratchBuffer(uint32_t lifetime) : mLifetime(lifetime), mResetCounter(lifetime) {} + +ScratchBuffer::~ScratchBuffer() {} + +bool ScratchBuffer::get(size_t requestedSize, MemoryBuffer **memoryBufferOut) +{ + return getImpl(requestedSize, memoryBufferOut, Optional<uint8_t>::Invalid()); +} + +bool ScratchBuffer::getInitialized(size_t requestedSize, + MemoryBuffer **memoryBufferOut, + uint8_t initValue) +{ + return getImpl(requestedSize, memoryBufferOut, Optional<uint8_t>(initValue)); +} + +bool ScratchBuffer::getImpl(size_t requestedSize, + MemoryBuffer **memoryBufferOut, + Optional<uint8_t> initValue) +{ + if (mScratchMemory.size() == requestedSize) + { + mResetCounter = mLifetime; + *memoryBufferOut = &mScratchMemory; + return true; + } + + if (mScratchMemory.size() > requestedSize) + { + tick(); + } + + if (mResetCounter == 0 || mScratchMemory.size() < requestedSize) + { + mScratchMemory.resize(0); + if (!mScratchMemory.resize(requestedSize)) + { + return false; + } + mResetCounter = mLifetime; + if (initValue.valid()) + { + mScratchMemory.fill(initValue.value()); + } + } + + ASSERT(mScratchMemory.size() >= requestedSize); + + *memoryBufferOut = &mScratchMemory; + return true; +} + +void ScratchBuffer::tick() +{ + if (mResetCounter > 0) + { + --mResetCounter; + } +} + +void ScratchBuffer::clear() +{ + mResetCounter = mLifetime; + mScratchMemory.resize(0); +} + +} // namespace angle diff --git a/gfx/angle/checkout/src/common/MemoryBuffer.h b/gfx/angle/checkout/src/common/MemoryBuffer.h new file mode 100644 index 0000000000..ae6b64c8c7 --- /dev/null +++ b/gfx/angle/checkout/src/common/MemoryBuffer.h @@ -0,0 +1,89 @@ +// +// Copyright (c) 2014 The ANGLE Project Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// + +#ifndef COMMON_MEMORYBUFFER_H_ +#define COMMON_MEMORYBUFFER_H_ + +#include "common/Optional.h" +#include "common/angleutils.h" +#include "common/debug.h" + +#include <stdint.h> +#include <cstddef> + +namespace angle +{ + +class MemoryBuffer final : NonCopyable +{ + public: + MemoryBuffer() = default; + MemoryBuffer(size_t size) { resize(size); } + ~MemoryBuffer(); + + MemoryBuffer(MemoryBuffer &&other); + MemoryBuffer &operator=(MemoryBuffer &&other); + + bool resize(size_t size); + size_t size() const { return mSize; } + bool empty() const { return mSize == 0; } + + const uint8_t *data() const { return mData; } + uint8_t *data() + { + ASSERT(mData); + return mData; + } + + uint8_t &operator[](size_t pos) + { + ASSERT(pos < mSize); + return mData[pos]; + } + const uint8_t &operator[](size_t pos) const + { + ASSERT(pos < mSize); + return mData[pos]; + } + + void fill(uint8_t datum); + + private: + size_t mSize = 0; + uint8_t *mData = nullptr; +}; + +class ScratchBuffer final : NonCopyable +{ + public: + // If we request a scratch buffer requesting a smaller size this many times, release and + // recreate the scratch buffer. This ensures we don't have a degenerate case where we are stuck + // hogging memory. + ScratchBuffer(uint32_t lifetime); + ~ScratchBuffer(); + + // Returns true with a memory buffer of the requested size, or false on failure. + bool get(size_t requestedSize, MemoryBuffer **memoryBufferOut); + + // Same as get, but ensures new values are initialized to a fixed constant. + bool getInitialized(size_t requestedSize, MemoryBuffer **memoryBufferOut, uint8_t initValue); + + // Ticks the release counter for the scratch buffer. Also done implicitly in get(). + void tick(); + + void clear(); + + private: + bool getImpl(size_t requestedSize, MemoryBuffer **memoryBufferOut, Optional<uint8_t> initValue); + + const uint32_t mLifetime; + uint32_t mResetCounter; + MemoryBuffer mScratchMemory; +}; + +} // namespace angle + +#endif // COMMON_MEMORYBUFFER_H_ diff --git a/gfx/angle/checkout/src/common/Optional.h b/gfx/angle/checkout/src/common/Optional.h new file mode 100644 index 0000000000..822de4de51 --- /dev/null +++ b/gfx/angle/checkout/src/common/Optional.h @@ -0,0 +1,68 @@ +// +// Copyright (c) 2015 The ANGLE Project Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// +// Optional.h: +// Represents a type that may be invalid, similar to std::optional. +// + +#ifndef COMMON_OPTIONAL_H_ +#define COMMON_OPTIONAL_H_ + +#include <utility> + +template <class T> +struct Optional +{ + Optional() : mValid(false), mValue(T()) {} + + Optional(const T &valueIn) : mValid(true), mValue(valueIn) {} + + Optional(const Optional &other) : mValid(other.mValid), mValue(other.mValue) {} + + Optional &operator=(const Optional &other) + { + this->mValid = other.mValid; + this->mValue = other.mValue; + return *this; + } + + Optional &operator=(const T &value) + { + mValue = value; + mValid = true; + return *this; + } + + Optional &operator=(T &&value) + { + mValue = std::move(value); + mValid = true; + return *this; + } + + void reset() { mValid = false; } + + static Optional Invalid() { return Optional(); } + + bool valid() const { return mValid; } + const T &value() const { return mValue; } + + bool operator==(const Optional &other) const + { + return ((mValid == other.mValid) && (!mValid || (mValue == other.mValue))); + } + + bool operator!=(const Optional &other) const { return !(*this == other); } + + bool operator==(const T &value) const { return mValid && (mValue == value); } + + bool operator!=(const T &value) const { return !(*this == value); } + + private: + bool mValid; + T mValue; +}; + +#endif // COMMON_OPTIONAL_H_ diff --git a/gfx/angle/checkout/src/common/PackedEGLEnums_autogen.cpp b/gfx/angle/checkout/src/common/PackedEGLEnums_autogen.cpp new file mode 100644 index 0000000000..6637335b76 --- /dev/null +++ b/gfx/angle/checkout/src/common/PackedEGLEnums_autogen.cpp @@ -0,0 +1,355 @@ +// GENERATED FILE - DO NOT EDIT. +// Generated by gen_packed_gl_enums.py using data from packed_egl_enums.json. +// +// Copyright 2019 The ANGLE Project Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// +// PackedEGLEnums_autogen.cpp: +// Implements ANGLE-specific enums classes for EGLenums and functions operating +// on them. + +#include "common/PackedEGLEnums_autogen.h" +#include "common/debug.h" + +namespace egl +{ + +template <> +CompositorTiming FromEGLenum<CompositorTiming>(EGLenum from) +{ + switch (from) + { + case EGL_COMPOSITE_DEADLINE_ANDROID: + return CompositorTiming::CompositeDeadline; + case EGL_COMPOSITE_INTERVAL_ANDROID: + return CompositorTiming::CompositInterval; + case EGL_COMPOSITE_TO_PRESENT_LATENCY_ANDROID: + return CompositorTiming::CompositToPresentLatency; + default: + return CompositorTiming::InvalidEnum; + } +} + +EGLenum ToEGLenum(CompositorTiming from) +{ + switch (from) + { + case CompositorTiming::CompositeDeadline: + return EGL_COMPOSITE_DEADLINE_ANDROID; + case CompositorTiming::CompositInterval: + return EGL_COMPOSITE_INTERVAL_ANDROID; + case CompositorTiming::CompositToPresentLatency: + return EGL_COMPOSITE_TO_PRESENT_LATENCY_ANDROID; + default: + UNREACHABLE(); + return 0; + } +} + +std::ostream &operator<<(std::ostream &os, CompositorTiming value) +{ + switch (value) + { + case CompositorTiming::CompositeDeadline: + os << "EGL_COMPOSITE_DEADLINE_ANDROID"; + break; + case CompositorTiming::CompositInterval: + os << "EGL_COMPOSITE_INTERVAL_ANDROID"; + break; + case CompositorTiming::CompositToPresentLatency: + os << "EGL_COMPOSITE_TO_PRESENT_LATENCY_ANDROID"; + break; + default: + os << "GL_INVALID_ENUM"; + break; + } + return os; +} + +template <> +MessageType FromEGLenum<MessageType>(EGLenum from) +{ + switch (from) + { + case EGL_DEBUG_MSG_CRITICAL_KHR: + return MessageType::Critical; + case EGL_DEBUG_MSG_ERROR_KHR: + return MessageType::Error; + case EGL_DEBUG_MSG_WARN_KHR: + return MessageType::Warn; + case EGL_DEBUG_MSG_INFO_KHR: + return MessageType::Info; + default: + return MessageType::InvalidEnum; + } +} + +EGLenum ToEGLenum(MessageType from) +{ + switch (from) + { + case MessageType::Critical: + return EGL_DEBUG_MSG_CRITICAL_KHR; + case MessageType::Error: + return EGL_DEBUG_MSG_ERROR_KHR; + case MessageType::Warn: + return EGL_DEBUG_MSG_WARN_KHR; + case MessageType::Info: + return EGL_DEBUG_MSG_INFO_KHR; + default: + UNREACHABLE(); + return 0; + } +} + +std::ostream &operator<<(std::ostream &os, MessageType value) +{ + switch (value) + { + case MessageType::Critical: + os << "EGL_DEBUG_MSG_CRITICAL_KHR"; + break; + case MessageType::Error: + os << "EGL_DEBUG_MSG_ERROR_KHR"; + break; + case MessageType::Warn: + os << "EGL_DEBUG_MSG_WARN_KHR"; + break; + case MessageType::Info: + os << "EGL_DEBUG_MSG_INFO_KHR"; + break; + default: + os << "GL_INVALID_ENUM"; + break; + } + return os; +} + +template <> +ObjectType FromEGLenum<ObjectType>(EGLenum from) +{ + switch (from) + { + case EGL_OBJECT_THREAD_KHR: + return ObjectType::Thread; + case EGL_OBJECT_DISPLAY_KHR: + return ObjectType::Display; + case EGL_OBJECT_CONTEXT_KHR: + return ObjectType::Context; + case EGL_OBJECT_SURFACE_KHR: + return ObjectType::Surface; + case EGL_OBJECT_IMAGE_KHR: + return ObjectType::Image; + case EGL_OBJECT_SYNC_KHR: + return ObjectType::Sync; + case EGL_OBJECT_STREAM_KHR: + return ObjectType::Stream; + default: + return ObjectType::InvalidEnum; + } +} + +EGLenum ToEGLenum(ObjectType from) +{ + switch (from) + { + case ObjectType::Thread: + return EGL_OBJECT_THREAD_KHR; + case ObjectType::Display: + return EGL_OBJECT_DISPLAY_KHR; + case ObjectType::Context: + return EGL_OBJECT_CONTEXT_KHR; + case ObjectType::Surface: + return EGL_OBJECT_SURFACE_KHR; + case ObjectType::Image: + return EGL_OBJECT_IMAGE_KHR; + case ObjectType::Sync: + return EGL_OBJECT_SYNC_KHR; + case ObjectType::Stream: + return EGL_OBJECT_STREAM_KHR; + default: + UNREACHABLE(); + return 0; + } +} + +std::ostream &operator<<(std::ostream &os, ObjectType value) +{ + switch (value) + { + case ObjectType::Thread: + os << "EGL_OBJECT_THREAD_KHR"; + break; + case ObjectType::Display: + os << "EGL_OBJECT_DISPLAY_KHR"; + break; + case ObjectType::Context: + os << "EGL_OBJECT_CONTEXT_KHR"; + break; + case ObjectType::Surface: + os << "EGL_OBJECT_SURFACE_KHR"; + break; + case ObjectType::Image: + os << "EGL_OBJECT_IMAGE_KHR"; + break; + case ObjectType::Sync: + os << "EGL_OBJECT_SYNC_KHR"; + break; + case ObjectType::Stream: + os << "EGL_OBJECT_STREAM_KHR"; + break; + default: + os << "GL_INVALID_ENUM"; + break; + } + return os; +} + +template <> +TextureFormat FromEGLenum<TextureFormat>(EGLenum from) +{ + switch (from) + { + case EGL_NO_TEXTURE: + return TextureFormat::NoTexture; + case EGL_TEXTURE_RGB: + return TextureFormat::RGB; + case EGL_TEXTURE_RGBA: + return TextureFormat::RGBA; + default: + return TextureFormat::InvalidEnum; + } +} + +EGLenum ToEGLenum(TextureFormat from) +{ + switch (from) + { + case TextureFormat::NoTexture: + return EGL_NO_TEXTURE; + case TextureFormat::RGB: + return EGL_TEXTURE_RGB; + case TextureFormat::RGBA: + return EGL_TEXTURE_RGBA; + default: + UNREACHABLE(); + return 0; + } +} + +std::ostream &operator<<(std::ostream &os, TextureFormat value) +{ + switch (value) + { + case TextureFormat::NoTexture: + os << "EGL_NO_TEXTURE"; + break; + case TextureFormat::RGB: + os << "EGL_TEXTURE_RGB"; + break; + case TextureFormat::RGBA: + os << "EGL_TEXTURE_RGBA"; + break; + default: + os << "GL_INVALID_ENUM"; + break; + } + return os; +} + +template <> +Timestamp FromEGLenum<Timestamp>(EGLenum from) +{ + switch (from) + { + case EGL_REQUESTED_PRESENT_TIME_ANDROID: + return Timestamp::RequestedPresentTime; + case EGL_RENDERING_COMPLETE_TIME_ANDROID: + return Timestamp::RenderingCompleteTime; + case EGL_COMPOSITION_LATCH_TIME_ANDROID: + return Timestamp::CompositionLatchTime; + case EGL_FIRST_COMPOSITION_START_TIME_ANDROID: + return Timestamp::FirstCompositionStartTime; + case EGL_LAST_COMPOSITION_START_TIME_ANDROID: + return Timestamp::LastCompositionStartTime; + case EGL_FIRST_COMPOSITION_GPU_FINISHED_TIME_ANDROID: + return Timestamp::FirstCompositionGPUFinishedTime; + case EGL_DISPLAY_PRESENT_TIME_ANDROID: + return Timestamp::DisplayPresentTime; + case EGL_DEQUEUE_READY_TIME_ANDROID: + return Timestamp::DequeueReadyTime; + case EGL_READS_DONE_TIME_ANDROID: + return Timestamp::ReadsDoneTime; + default: + return Timestamp::InvalidEnum; + } +} + +EGLenum ToEGLenum(Timestamp from) +{ + switch (from) + { + case Timestamp::RequestedPresentTime: + return EGL_REQUESTED_PRESENT_TIME_ANDROID; + case Timestamp::RenderingCompleteTime: + return EGL_RENDERING_COMPLETE_TIME_ANDROID; + case Timestamp::CompositionLatchTime: + return EGL_COMPOSITION_LATCH_TIME_ANDROID; + case Timestamp::FirstCompositionStartTime: + return EGL_FIRST_COMPOSITION_START_TIME_ANDROID; + case Timestamp::LastCompositionStartTime: + return EGL_LAST_COMPOSITION_START_TIME_ANDROID; + case Timestamp::FirstCompositionGPUFinishedTime: + return EGL_FIRST_COMPOSITION_GPU_FINISHED_TIME_ANDROID; + case Timestamp::DisplayPresentTime: + return EGL_DISPLAY_PRESENT_TIME_ANDROID; + case Timestamp::DequeueReadyTime: + return EGL_DEQUEUE_READY_TIME_ANDROID; + case Timestamp::ReadsDoneTime: + return EGL_READS_DONE_TIME_ANDROID; + default: + UNREACHABLE(); + return 0; + } +} + +std::ostream &operator<<(std::ostream &os, Timestamp value) +{ + switch (value) + { + case Timestamp::RequestedPresentTime: + os << "EGL_REQUESTED_PRESENT_TIME_ANDROID"; + break; + case Timestamp::RenderingCompleteTime: + os << "EGL_RENDERING_COMPLETE_TIME_ANDROID"; + break; + case Timestamp::CompositionLatchTime: + os << "EGL_COMPOSITION_LATCH_TIME_ANDROID"; + break; + case Timestamp::FirstCompositionStartTime: + os << "EGL_FIRST_COMPOSITION_START_TIME_ANDROID"; + break; + case Timestamp::LastCompositionStartTime: + os << "EGL_LAST_COMPOSITION_START_TIME_ANDROID"; + break; + case Timestamp::FirstCompositionGPUFinishedTime: + os << "EGL_FIRST_COMPOSITION_GPU_FINISHED_TIME_ANDROID"; + break; + case Timestamp::DisplayPresentTime: + os << "EGL_DISPLAY_PRESENT_TIME_ANDROID"; + break; + case Timestamp::DequeueReadyTime: + os << "EGL_DEQUEUE_READY_TIME_ANDROID"; + break; + case Timestamp::ReadsDoneTime: + os << "EGL_READS_DONE_TIME_ANDROID"; + break; + default: + os << "GL_INVALID_ENUM"; + break; + } + return os; +} + +} // namespace egl diff --git a/gfx/angle/checkout/src/common/PackedEGLEnums_autogen.h b/gfx/angle/checkout/src/common/PackedEGLEnums_autogen.h new file mode 100644 index 0000000000..fc236bfdd0 --- /dev/null +++ b/gfx/angle/checkout/src/common/PackedEGLEnums_autogen.h @@ -0,0 +1,116 @@ +// GENERATED FILE - DO NOT EDIT. +// Generated by gen_packed_gl_enums.py using data from packed_egl_enums.json. +// +// Copyright 2019 The ANGLE Project Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// +// PackedEGLEnums_autogen.h: +// Declares ANGLE-specific enums classes for EGLenums and functions operating +// on them. + +#ifndef COMMON_PACKEDEGLENUMS_AUTOGEN_H_ +#define COMMON_PACKEDEGLENUMS_AUTOGEN_H_ + +#include <EGL/egl.h> +#include <EGL/eglext.h> +#include <angle_gl.h> + +#include <cstdint> +#include <ostream> + +namespace egl +{ + +template <typename Enum> +Enum FromEGLenum(EGLenum from); + +enum class CompositorTiming : uint8_t +{ + CompositeDeadline = 0, + CompositInterval = 1, + CompositToPresentLatency = 2, + + InvalidEnum = 3, + EnumCount = 3, +}; + +template <> +CompositorTiming FromEGLenum<CompositorTiming>(EGLenum from); +EGLenum ToEGLenum(CompositorTiming from); +std::ostream &operator<<(std::ostream &os, CompositorTiming value); + +enum class MessageType : uint8_t +{ + Critical = 0, + Error = 1, + Warn = 2, + Info = 3, + + InvalidEnum = 4, + EnumCount = 4, +}; + +template <> +MessageType FromEGLenum<MessageType>(EGLenum from); +EGLenum ToEGLenum(MessageType from); +std::ostream &operator<<(std::ostream &os, MessageType value); + +enum class ObjectType : uint8_t +{ + Thread = 0, + Display = 1, + Context = 2, + Surface = 3, + Image = 4, + Sync = 5, + Stream = 6, + + InvalidEnum = 7, + EnumCount = 7, +}; + +template <> +ObjectType FromEGLenum<ObjectType>(EGLenum from); +EGLenum ToEGLenum(ObjectType from); +std::ostream &operator<<(std::ostream &os, ObjectType value); + +enum class TextureFormat : uint8_t +{ + NoTexture = 0, + RGB = 1, + RGBA = 2, + + InvalidEnum = 3, + EnumCount = 3, +}; + +template <> +TextureFormat FromEGLenum<TextureFormat>(EGLenum from); +EGLenum ToEGLenum(TextureFormat from); +std::ostream &operator<<(std::ostream &os, TextureFormat value); + +enum class Timestamp : uint8_t +{ + RequestedPresentTime = 0, + RenderingCompleteTime = 1, + CompositionLatchTime = 2, + FirstCompositionStartTime = 3, + LastCompositionStartTime = 4, + FirstCompositionGPUFinishedTime = 5, + DisplayPresentTime = 6, + DequeueReadyTime = 7, + ReadsDoneTime = 8, + + InvalidEnum = 9, + EnumCount = 9, +}; + +template <> +Timestamp FromEGLenum<Timestamp>(EGLenum from); +EGLenum ToEGLenum(Timestamp from); +std::ostream &operator<<(std::ostream &os, Timestamp value); + +} // namespace egl + +#endif // COMMON_PACKEDEGLENUMS_AUTOGEN_H_ diff --git a/gfx/angle/checkout/src/common/PackedEnums.cpp b/gfx/angle/checkout/src/common/PackedEnums.cpp new file mode 100644 index 0000000000..9f219b8200 --- /dev/null +++ b/gfx/angle/checkout/src/common/PackedEnums.cpp @@ -0,0 +1,367 @@ +// Copyright 2018 The ANGLE Project Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// +// PackedGLEnums.cpp: +// Declares ANGLE-specific enums classes for GLEnum and functions operating +// on them. + +#include "common/PackedEnums.h" + +#include "common/utilities.h" + +namespace gl +{ + +TextureType TextureTargetToType(TextureTarget target) +{ + switch (target) + { + case TextureTarget::CubeMapNegativeX: + case TextureTarget::CubeMapNegativeY: + case TextureTarget::CubeMapNegativeZ: + case TextureTarget::CubeMapPositiveX: + case TextureTarget::CubeMapPositiveY: + case TextureTarget::CubeMapPositiveZ: + return TextureType::CubeMap; + case TextureTarget::External: + return TextureType::External; + case TextureTarget::Rectangle: + return TextureType::Rectangle; + case TextureTarget::_2D: + return TextureType::_2D; + case TextureTarget::_2DArray: + return TextureType::_2DArray; + case TextureTarget::_2DMultisample: + return TextureType::_2DMultisample; + case TextureTarget::_2DMultisampleArray: + return TextureType::_2DMultisampleArray; + case TextureTarget::_3D: + return TextureType::_3D; + case TextureTarget::InvalidEnum: + return TextureType::InvalidEnum; + default: + UNREACHABLE(); + return TextureType::InvalidEnum; + } +} + +bool IsCubeMapFaceTarget(TextureTarget target) +{ + return TextureTargetToType(target) == TextureType::CubeMap; +} + +TextureTarget NonCubeTextureTypeToTarget(TextureType type) +{ + switch (type) + { + case TextureType::External: + return TextureTarget::External; + case TextureType::Rectangle: + return TextureTarget::Rectangle; + case TextureType::_2D: + return TextureTarget::_2D; + case TextureType::_2DArray: + return TextureTarget::_2DArray; + case TextureType::_2DMultisample: + return TextureTarget::_2DMultisample; + case TextureType::_2DMultisampleArray: + return TextureTarget::_2DMultisampleArray; + case TextureType::_3D: + return TextureTarget::_3D; + default: + UNREACHABLE(); + return TextureTarget::InvalidEnum; + } +} + +// Check that we can do arithmetic on TextureTarget to convert from / to cube map faces +static_assert(static_cast<uint8_t>(TextureTarget::CubeMapNegativeX) - + static_cast<uint8_t>(TextureTarget::CubeMapPositiveX) == + 1u, + ""); +static_assert(static_cast<uint8_t>(TextureTarget::CubeMapPositiveY) - + static_cast<uint8_t>(TextureTarget::CubeMapPositiveX) == + 2u, + ""); +static_assert(static_cast<uint8_t>(TextureTarget::CubeMapNegativeY) - + static_cast<uint8_t>(TextureTarget::CubeMapPositiveX) == + 3u, + ""); +static_assert(static_cast<uint8_t>(TextureTarget::CubeMapPositiveZ) - + static_cast<uint8_t>(TextureTarget::CubeMapPositiveX) == + 4u, + ""); +static_assert(static_cast<uint8_t>(TextureTarget::CubeMapNegativeZ) - + static_cast<uint8_t>(TextureTarget::CubeMapPositiveX) == + 5u, + ""); + +TextureTarget CubeFaceIndexToTextureTarget(size_t face) +{ + ASSERT(face < 6u); + return static_cast<TextureTarget>(static_cast<uint8_t>(TextureTarget::CubeMapPositiveX) + face); +} + +size_t CubeMapTextureTargetToFaceIndex(TextureTarget target) +{ + ASSERT(IsCubeMapFaceTarget(target)); + return static_cast<uint8_t>(target) - static_cast<uint8_t>(TextureTarget::CubeMapPositiveX); +} + +TextureType SamplerTypeToTextureType(GLenum samplerType) +{ + switch (samplerType) + { + case GL_SAMPLER_2D: + case GL_INT_SAMPLER_2D: + case GL_UNSIGNED_INT_SAMPLER_2D: + case GL_SAMPLER_2D_SHADOW: + return TextureType::_2D; + + case GL_SAMPLER_EXTERNAL_OES: + return TextureType::External; + + case GL_SAMPLER_CUBE: + case GL_INT_SAMPLER_CUBE: + case GL_UNSIGNED_INT_SAMPLER_CUBE: + case GL_SAMPLER_CUBE_SHADOW: + return TextureType::CubeMap; + + case GL_SAMPLER_2D_ARRAY: + case GL_INT_SAMPLER_2D_ARRAY: + case GL_UNSIGNED_INT_SAMPLER_2D_ARRAY: + case GL_SAMPLER_2D_ARRAY_SHADOW: + return TextureType::_2DArray; + + case GL_SAMPLER_3D: + case GL_INT_SAMPLER_3D: + case GL_UNSIGNED_INT_SAMPLER_3D: + return TextureType::_3D; + + case GL_SAMPLER_2D_MULTISAMPLE: + case GL_INT_SAMPLER_2D_MULTISAMPLE: + case GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE: + return TextureType::_2DMultisample; + + case GL_SAMPLER_2D_MULTISAMPLE_ARRAY: + case GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY: + case GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY: + return TextureType::_2DMultisampleArray; + + case GL_SAMPLER_2D_RECT_ANGLE: + return TextureType::Rectangle; + + default: + UNREACHABLE(); + return TextureType::InvalidEnum; + } +} + +bool IsMultisampled(gl::TextureType type) +{ + switch (type) + { + case gl::TextureType::_2DMultisample: + case gl::TextureType::_2DMultisampleArray: + return true; + default: + return false; + } +} + +std::ostream &operator<<(std::ostream &os, PrimitiveMode value) +{ + switch (value) + { + case PrimitiveMode::LineLoop: + os << "GL_LINE_LOOP"; + break; + case PrimitiveMode::Lines: + os << "GL_LINES"; + break; + case PrimitiveMode::LinesAdjacency: + os << "GL_LINES_ADJACENCY"; + break; + case PrimitiveMode::LineStrip: + os << "GL_LINE_STRIP"; + break; + case PrimitiveMode::LineStripAdjacency: + os << "GL_LINE_STRIP_ADJANCENCY"; + break; + case PrimitiveMode::Points: + os << "GL_POINTS"; + break; + case PrimitiveMode::TriangleFan: + os << "GL_TRIANGLE_FAN"; + break; + case PrimitiveMode::Triangles: + os << "GL_TRIANGLES"; + break; + case PrimitiveMode::TrianglesAdjacency: + os << "GL_TRIANGLES_ADJANCENCY"; + break; + case PrimitiveMode::TriangleStrip: + os << "GL_TRIANGLE_STRIP"; + break; + case PrimitiveMode::TriangleStripAdjacency: + os << "GL_TRIANGLE_STRIP_ADJACENCY"; + break; + default: + os << "GL_INVALID_ENUM"; + break; + } + return os; +} + +std::ostream &operator<<(std::ostream &os, DrawElementsType value) +{ + switch (value) + { + case DrawElementsType::UnsignedByte: + os << "GL_UNSIGNED_BYTE"; + break; + case DrawElementsType::UnsignedShort: + os << "GL_UNSIGNED_SHORT"; + break; + case DrawElementsType::UnsignedInt: + os << "GL_UNSIGNED_INT"; + break; + default: + os << "GL_INVALID_ENUM"; + break; + } + + return os; +} + +std::ostream &operator<<(std::ostream &os, VertexAttribType value) +{ + switch (value) + { + case VertexAttribType::Byte: + os << "GL_UNSIGNED_BYTE"; + break; + case VertexAttribType::Fixed: + os << "GL_FIXED"; + break; + case VertexAttribType::Float: + os << "GL_FLOAT"; + break; + case VertexAttribType::HalfFloat: + os << "GL_HALF_FLOAT"; + break; + case VertexAttribType::Int: + os << "GL_INT"; + break; + case VertexAttribType::Int2101010: + os << "GL_INT_10_10_10_2"; + break; + case VertexAttribType::Short: + os << "GL_SHORT"; + break; + case VertexAttribType::UnsignedByte: + os << "GL_UNSIGNED_BYTE"; + break; + case VertexAttribType::UnsignedInt: + os << "GL_UNSIGNED_INT"; + break; + case VertexAttribType::UnsignedInt2101010: + os << "GL_UNSIGNED_INT_10_10_10_2"; + break; + case VertexAttribType::UnsignedShort: + os << "GL_UNSIGNED_SHORT"; + break; + default: + os << "GL_INVALID_ENUM"; + break; + } + return os; +} +} // namespace gl + +namespace egl +{ +MessageType ErrorCodeToMessageType(EGLint errorCode) +{ + switch (errorCode) + { + case EGL_BAD_ALLOC: + case EGL_CONTEXT_LOST: + case EGL_NOT_INITIALIZED: + return MessageType::Critical; + + case EGL_BAD_ACCESS: + case EGL_BAD_ATTRIBUTE: + case EGL_BAD_CONFIG: + case EGL_BAD_CONTEXT: + case EGL_BAD_CURRENT_SURFACE: + case EGL_BAD_DISPLAY: + case EGL_BAD_MATCH: + case EGL_BAD_NATIVE_PIXMAP: + case EGL_BAD_NATIVE_WINDOW: + case EGL_BAD_PARAMETER: + case EGL_BAD_SURFACE: + case EGL_BAD_STREAM_KHR: + case EGL_BAD_STATE_KHR: + case EGL_BAD_DEVICE_EXT: + return MessageType::Error; + + case EGL_SUCCESS: + default: + UNREACHABLE(); + return MessageType::InvalidEnum; + } +} +} // namespace egl + +namespace egl_gl +{ + +gl::TextureTarget EGLCubeMapTargetToCubeMapTarget(EGLenum eglTarget) +{ + ASSERT(egl::IsCubeMapTextureTarget(eglTarget)); + return gl::CubeFaceIndexToTextureTarget(egl::CubeMapTextureTargetToLayerIndex(eglTarget)); +} + +gl::TextureTarget EGLImageTargetToTextureTarget(EGLenum eglTarget) +{ + switch (eglTarget) + { + case EGL_GL_TEXTURE_2D_KHR: + return gl::TextureTarget::_2D; + + case EGL_GL_TEXTURE_CUBE_MAP_POSITIVE_X_KHR: + case EGL_GL_TEXTURE_CUBE_MAP_NEGATIVE_X_KHR: + case EGL_GL_TEXTURE_CUBE_MAP_POSITIVE_Y_KHR: + case EGL_GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_KHR: + case EGL_GL_TEXTURE_CUBE_MAP_POSITIVE_Z_KHR: + case EGL_GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_KHR: + return EGLCubeMapTargetToCubeMapTarget(eglTarget); + + case EGL_GL_TEXTURE_3D_KHR: + return gl::TextureTarget::_3D; + + default: + UNREACHABLE(); + return gl::TextureTarget::InvalidEnum; + } +} + +gl::TextureType EGLTextureTargetToTextureType(EGLenum eglTarget) +{ + switch (eglTarget) + { + case EGL_TEXTURE_2D: + return gl::TextureType::_2D; + + case EGL_TEXTURE_RECTANGLE_ANGLE: + return gl::TextureType::Rectangle; + + default: + UNREACHABLE(); + return gl::TextureType::InvalidEnum; + } +} + +} // namespace egl_gl diff --git a/gfx/angle/checkout/src/common/PackedEnums.h b/gfx/angle/checkout/src/common/PackedEnums.h new file mode 100644 index 0000000000..ad6a313a14 --- /dev/null +++ b/gfx/angle/checkout/src/common/PackedEnums.h @@ -0,0 +1,390 @@ +// Copyright 2017 The ANGLE Project Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// +// PackedGLEnums_autogen.h: +// Declares ANGLE-specific enums classes for GLEnum and functions operating +// on them. + +#ifndef COMMON_PACKEDGLENUMS_H_ +#define COMMON_PACKEDGLENUMS_H_ + +#include "common/PackedEGLEnums_autogen.h" +#include "common/PackedGLEnums_autogen.h" + +#include <array> +#include <bitset> +#include <cstddef> + +#include <EGL/egl.h> + +#include "common/bitset_utils.h" + +namespace angle +{ + +// Return the number of elements of a packed enum, including the InvalidEnum element. +template <typename E> +constexpr size_t EnumSize() +{ + using UnderlyingType = typename std::underlying_type<E>::type; + return static_cast<UnderlyingType>(E::EnumCount); +} + +// Implementation of AllEnums which allows iterating over all the possible values for a packed enums +// like so: +// for (auto value : AllEnums<MyPackedEnum>()) { +// // Do something with the enum. +// } + +template <typename E> +class EnumIterator final +{ + private: + using UnderlyingType = typename std::underlying_type<E>::type; + + public: + EnumIterator(E value) : mValue(static_cast<UnderlyingType>(value)) {} + EnumIterator &operator++() + { + mValue++; + return *this; + } + bool operator==(const EnumIterator &other) const { return mValue == other.mValue; } + bool operator!=(const EnumIterator &other) const { return mValue != other.mValue; } + E operator*() const { return static_cast<E>(mValue); } + + private: + UnderlyingType mValue; +}; + +template <typename E> +struct AllEnums +{ + EnumIterator<E> begin() const { return {static_cast<E>(0)}; } + EnumIterator<E> end() const { return {E::InvalidEnum}; } +}; + +// PackedEnumMap<E, T> is like an std::array<T, E::EnumCount> but is indexed with enum values. It +// implements all of the std::array interface except with enum values instead of indices. +template <typename E, typename T, size_t MaxSize = EnumSize<E>()> +class PackedEnumMap +{ + using UnderlyingType = typename std::underlying_type<E>::type; + using Storage = std::array<T, MaxSize>; + + public: + using InitPair = std::pair<E, T>; + + constexpr PackedEnumMap() = default; + + constexpr PackedEnumMap(std::initializer_list<InitPair> init) : mPrivateData{} + { + // We use a for loop instead of range-for to work around a limitation in MSVC. + for (const InitPair *it = init.begin(); it != init.end(); ++it) + { + // This horrible const_cast pattern is necessary to work around a constexpr limitation. + // See https://stackoverflow.com/q/34199774/ . Note that it should be fixed with C++17. + const_cast<T &>(const_cast<const Storage &>( + mPrivateData)[static_cast<UnderlyingType>(it->first)]) = it->second; + } + } + + // types: + using value_type = T; + using pointer = T *; + using const_pointer = const T *; + using reference = T &; + using const_reference = const T &; + + using size_type = size_t; + using difference_type = ptrdiff_t; + + using iterator = typename Storage::iterator; + using const_iterator = typename Storage::const_iterator; + using reverse_iterator = std::reverse_iterator<iterator>; + using const_reverse_iterator = std::reverse_iterator<const_iterator>; + + // No explicit construct/copy/destroy for aggregate type + void fill(const T &u) { mPrivateData.fill(u); } + void swap(PackedEnumMap<E, T> &a) noexcept { mPrivateData.swap(a.mPrivateData); } + + // iterators: + iterator begin() noexcept { return mPrivateData.begin(); } + const_iterator begin() const noexcept { return mPrivateData.begin(); } + iterator end() noexcept { return mPrivateData.end(); } + const_iterator end() const noexcept { return mPrivateData.end(); } + + reverse_iterator rbegin() noexcept { return mPrivateData.rbegin(); } + const_reverse_iterator rbegin() const noexcept { return mPrivateData.rbegin(); } + reverse_iterator rend() noexcept { return mPrivateData.rend(); } + const_reverse_iterator rend() const noexcept { return mPrivateData.rend(); } + + // capacity: + constexpr size_type size() const noexcept { return mPrivateData.size(); } + constexpr size_type max_size() const noexcept { return mPrivateData.max_size(); } + constexpr bool empty() const noexcept { return mPrivateData.empty(); } + + // element access: + reference operator[](E n) + { + ASSERT(static_cast<size_t>(n) < mPrivateData.size()); + return mPrivateData[static_cast<UnderlyingType>(n)]; + } + + constexpr const_reference operator[](E n) const + { + ASSERT(static_cast<size_t>(n) < mPrivateData.size()); + return mPrivateData[static_cast<UnderlyingType>(n)]; + } + + const_reference at(E n) const { return mPrivateData.at(static_cast<UnderlyingType>(n)); } + reference at(E n) { return mPrivateData.at(static_cast<UnderlyingType>(n)); } + + reference front() { return mPrivateData.front(); } + const_reference front() const { return mPrivateData.front(); } + reference back() { return mPrivateData.back(); } + const_reference back() const { return mPrivateData.back(); } + + T *data() noexcept { return mPrivateData.data(); } + const T *data() const noexcept { return mPrivateData.data(); } + + private: + Storage mPrivateData; +}; + +// PackedEnumBitSetE> is like an std::bitset<E::EnumCount> but is indexed with enum values. It +// implements the std::bitset interface except with enum values instead of indices. +template <typename E, typename DataT = uint32_t> +using PackedEnumBitSet = BitSetT<EnumSize<E>(), DataT, E>; + +} // namespace angle + +namespace gl +{ + +TextureType TextureTargetToType(TextureTarget target); +TextureTarget NonCubeTextureTypeToTarget(TextureType type); + +TextureTarget CubeFaceIndexToTextureTarget(size_t face); +size_t CubeMapTextureTargetToFaceIndex(TextureTarget target); +bool IsCubeMapFaceTarget(TextureTarget target); + +constexpr TextureTarget kCubeMapTextureTargetMin = TextureTarget::CubeMapPositiveX; +constexpr TextureTarget kCubeMapTextureTargetMax = TextureTarget::CubeMapNegativeZ; +constexpr TextureTarget kAfterCubeMapTextureTargetMax = + static_cast<TextureTarget>(static_cast<uint8_t>(kCubeMapTextureTargetMax) + 1); +struct AllCubeFaceTextureTargets +{ + angle::EnumIterator<TextureTarget> begin() const { return kCubeMapTextureTargetMin; } + angle::EnumIterator<TextureTarget> end() const { return kAfterCubeMapTextureTargetMax; } +}; + +constexpr ShaderType kGLES2ShaderTypeMin = ShaderType::Vertex; +constexpr ShaderType kGLES2ShaderTypeMax = ShaderType::Fragment; +constexpr ShaderType kAfterGLES2ShaderTypeMax = + static_cast<ShaderType>(static_cast<uint8_t>(kGLES2ShaderTypeMax) + 1); +struct AllGLES2ShaderTypes +{ + angle::EnumIterator<ShaderType> begin() const { return kGLES2ShaderTypeMin; } + angle::EnumIterator<ShaderType> end() const { return kAfterGLES2ShaderTypeMax; } +}; + +constexpr ShaderType kShaderTypeMin = ShaderType::Vertex; +constexpr ShaderType kShaderTypeMax = ShaderType::Compute; +constexpr ShaderType kAfterShaderTypeMax = + static_cast<ShaderType>(static_cast<uint8_t>(kShaderTypeMax) + 1); +struct AllShaderTypes +{ + angle::EnumIterator<ShaderType> begin() const { return kShaderTypeMin; } + angle::EnumIterator<ShaderType> end() const { return kAfterShaderTypeMax; } +}; + +constexpr size_t kGraphicsShaderCount = static_cast<size_t>(ShaderType::EnumCount) - 1u; +// Arrange the shader types in the order of rendering pipeline +constexpr std::array<ShaderType, kGraphicsShaderCount> kAllGraphicsShaderTypes = { + ShaderType::Vertex, ShaderType::Geometry, ShaderType::Fragment}; + +using ShaderBitSet = angle::PackedEnumBitSet<ShaderType, uint8_t>; +static_assert(sizeof(ShaderBitSet) == sizeof(uint8_t), "Unexpected size"); + +template <typename T> +using ShaderMap = angle::PackedEnumMap<ShaderType, T>; + +TextureType SamplerTypeToTextureType(GLenum samplerType); + +bool IsMultisampled(gl::TextureType type); + +enum class PrimitiveMode : uint8_t +{ + Points = 0x0, + Lines = 0x1, + LineLoop = 0x2, + LineStrip = 0x3, + Triangles = 0x4, + TriangleStrip = 0x5, + TriangleFan = 0x6, + Unused1 = 0x7, + Unused2 = 0x8, + Unused3 = 0x9, + LinesAdjacency = 0xA, + LineStripAdjacency = 0xB, + TrianglesAdjacency = 0xC, + TriangleStripAdjacency = 0xD, + + InvalidEnum = 0xE, + EnumCount = 0xE, +}; + +template <> +constexpr PrimitiveMode FromGLenum<PrimitiveMode>(GLenum from) +{ + if (from >= static_cast<GLenum>(PrimitiveMode::EnumCount)) + { + return PrimitiveMode::InvalidEnum; + } + + return static_cast<PrimitiveMode>(from); +} + +constexpr GLenum ToGLenum(PrimitiveMode from) +{ + return static_cast<GLenum>(from); +} + +static_assert(ToGLenum(PrimitiveMode::Points) == GL_POINTS, "PrimitiveMode violation"); +static_assert(ToGLenum(PrimitiveMode::Lines) == GL_LINES, "PrimitiveMode violation"); +static_assert(ToGLenum(PrimitiveMode::LineLoop) == GL_LINE_LOOP, "PrimitiveMode violation"); +static_assert(ToGLenum(PrimitiveMode::LineStrip) == GL_LINE_STRIP, "PrimitiveMode violation"); +static_assert(ToGLenum(PrimitiveMode::Triangles) == GL_TRIANGLES, "PrimitiveMode violation"); +static_assert(ToGLenum(PrimitiveMode::TriangleStrip) == GL_TRIANGLE_STRIP, + "PrimitiveMode violation"); +static_assert(ToGLenum(PrimitiveMode::TriangleFan) == GL_TRIANGLE_FAN, "PrimitiveMode violation"); +static_assert(ToGLenum(PrimitiveMode::LinesAdjacency) == GL_LINES_ADJACENCY, + "PrimitiveMode violation"); +static_assert(ToGLenum(PrimitiveMode::LineStripAdjacency) == GL_LINE_STRIP_ADJACENCY, + "PrimitiveMode violation"); +static_assert(ToGLenum(PrimitiveMode::TrianglesAdjacency) == GL_TRIANGLES_ADJACENCY, + "PrimitiveMode violation"); +static_assert(ToGLenum(PrimitiveMode::TriangleStripAdjacency) == GL_TRIANGLE_STRIP_ADJACENCY, + "PrimitiveMode violation"); + +std::ostream &operator<<(std::ostream &os, PrimitiveMode value); + +enum class DrawElementsType : size_t +{ + UnsignedByte = 0, + UnsignedShort = 1, + UnsignedInt = 2, + InvalidEnum = 3, + EnumCount = 3, +}; + +template <> +constexpr DrawElementsType FromGLenum<DrawElementsType>(GLenum from) +{ + + GLenum scaled = (from - GL_UNSIGNED_BYTE); + // This code sequence generates a ROR instruction on x86/arm. We want to check if the lowest bit + // of scaled is set and if (scaled >> 1) is greater than a non-pot value. If we rotate the + // lowest bit to the hightest bit both conditions can be checked with a single test. + static_assert(sizeof(GLenum) == 4, "Update (scaled << 31) to sizeof(GLenum) * 8 - 1"); + GLenum packed = (scaled >> 1) | (scaled << 31); + + // operator ? with a simple assignment usually translates to a cmov instruction and thus avoids + // a branch. + packed = (packed >= static_cast<GLenum>(DrawElementsType::EnumCount)) + ? static_cast<GLenum>(DrawElementsType::InvalidEnum) + : packed; + + return static_cast<DrawElementsType>(packed); +} + +constexpr GLenum ToGLenum(DrawElementsType from) +{ + return ((static_cast<GLenum>(from) << 1) + GL_UNSIGNED_BYTE); +} + +#define ANGLE_VALIDATE_PACKED_ENUM(type, packed, glenum) \ + static_assert(ToGLenum(type::packed) == glenum, #type " violation"); \ + static_assert(FromGLenum<type>(glenum) == type::packed, #type " violation") + +ANGLE_VALIDATE_PACKED_ENUM(DrawElementsType, UnsignedByte, GL_UNSIGNED_BYTE); +ANGLE_VALIDATE_PACKED_ENUM(DrawElementsType, UnsignedShort, GL_UNSIGNED_SHORT); +ANGLE_VALIDATE_PACKED_ENUM(DrawElementsType, UnsignedInt, GL_UNSIGNED_INT); + +std::ostream &operator<<(std::ostream &os, DrawElementsType value); + +enum class VertexAttribType +{ + Byte = 0, // GLenum == 0x1400 + UnsignedByte = 1, // GLenum == 0x1401 + Short = 2, // GLenum == 0x1402 + UnsignedShort = 3, // GLenum == 0x1403 + Int = 4, // GLenum == 0x1404 + UnsignedInt = 5, // GLenum == 0x1405 + Float = 6, // GLenum == 0x1406 + Unused1 = 7, // GLenum == 0x1407 + Unused2 = 8, // GLenum == 0x1408 + Unused3 = 9, // GLenum == 0x1409 + Unused4 = 10, // GLenum == 0x140A + HalfFloat = 11, // GLenum == 0x140B + Fixed = 12, // GLenum == 0x140C + MaxBasicType = 12, + UnsignedInt2101010 = 13, // GLenum == 0x8368 + Int2101010 = 14, // GLenum == 0x8D9F + InvalidEnum = 15, + EnumCount = 15, +}; + +template <> +constexpr VertexAttribType FromGLenum<VertexAttribType>(GLenum from) +{ + GLenum packed = from - GL_BYTE; + if (packed <= static_cast<GLenum>(VertexAttribType::MaxBasicType)) + return static_cast<VertexAttribType>(packed); + if (from == GL_UNSIGNED_INT_2_10_10_10_REV) + return VertexAttribType::UnsignedInt2101010; + if (from == GL_INT_2_10_10_10_REV) + return VertexAttribType::Int2101010; + return VertexAttribType::InvalidEnum; +} + +constexpr GLenum ToGLenum(VertexAttribType from) +{ + // This could be optimized using a constexpr table. + if (from == VertexAttribType::Int2101010) + return GL_INT_2_10_10_10_REV; + if (from == VertexAttribType::UnsignedInt2101010) + return GL_UNSIGNED_INT_2_10_10_10_REV; + return static_cast<GLenum>(from) + GL_BYTE; +} + +ANGLE_VALIDATE_PACKED_ENUM(VertexAttribType, Byte, GL_BYTE); +ANGLE_VALIDATE_PACKED_ENUM(VertexAttribType, UnsignedByte, GL_UNSIGNED_BYTE); +ANGLE_VALIDATE_PACKED_ENUM(VertexAttribType, Short, GL_SHORT); +ANGLE_VALIDATE_PACKED_ENUM(VertexAttribType, UnsignedShort, GL_UNSIGNED_SHORT); +ANGLE_VALIDATE_PACKED_ENUM(VertexAttribType, Int, GL_INT); +ANGLE_VALIDATE_PACKED_ENUM(VertexAttribType, UnsignedInt, GL_UNSIGNED_INT); +ANGLE_VALIDATE_PACKED_ENUM(VertexAttribType, Float, GL_FLOAT); +ANGLE_VALIDATE_PACKED_ENUM(VertexAttribType, HalfFloat, GL_HALF_FLOAT); +ANGLE_VALIDATE_PACKED_ENUM(VertexAttribType, Fixed, GL_FIXED); +ANGLE_VALIDATE_PACKED_ENUM(VertexAttribType, Int2101010, GL_INT_2_10_10_10_REV); +ANGLE_VALIDATE_PACKED_ENUM(VertexAttribType, UnsignedInt2101010, GL_UNSIGNED_INT_2_10_10_10_REV); + +std::ostream &operator<<(std::ostream &os, VertexAttribType value); +} // namespace gl + +namespace egl +{ +MessageType ErrorCodeToMessageType(EGLint errorCode); +} // namespace egl + +namespace egl_gl +{ +gl::TextureTarget EGLCubeMapTargetToCubeMapTarget(EGLenum eglTarget); +gl::TextureTarget EGLImageTargetToTextureTarget(EGLenum eglTarget); +gl::TextureType EGLTextureTargetToTextureType(EGLenum eglTarget); +} // namespace egl_gl + +#endif // COMMON_PACKEDGLENUMS_H_ diff --git a/gfx/angle/checkout/src/common/PackedGLEnums_autogen.cpp b/gfx/angle/checkout/src/common/PackedGLEnums_autogen.cpp new file mode 100644 index 0000000000..12974efc6b --- /dev/null +++ b/gfx/angle/checkout/src/common/PackedGLEnums_autogen.cpp @@ -0,0 +1,2285 @@ +// GENERATED FILE - DO NOT EDIT. +// Generated by gen_packed_gl_enums.py using data from packed_gl_enums.json. +// +// Copyright 2019 The ANGLE Project Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// +// PackedGLEnums_autogen.cpp: +// Implements ANGLE-specific enums classes for GLenums and functions operating +// on them. + +#include "common/PackedGLEnums_autogen.h" +#include "common/debug.h" + +namespace gl +{ + +template <> +AlphaTestFunc FromGLenum<AlphaTestFunc>(GLenum from) +{ + switch (from) + { + case GL_ALWAYS: + return AlphaTestFunc::AlwaysPass; + case GL_EQUAL: + return AlphaTestFunc::Equal; + case GL_GEQUAL: + return AlphaTestFunc::Gequal; + case GL_GREATER: + return AlphaTestFunc::Greater; + case GL_LEQUAL: + return AlphaTestFunc::Lequal; + case GL_LESS: + return AlphaTestFunc::Less; + case GL_NEVER: + return AlphaTestFunc::Never; + case GL_NOTEQUAL: + return AlphaTestFunc::NotEqual; + default: + return AlphaTestFunc::InvalidEnum; + } +} + +GLenum ToGLenum(AlphaTestFunc from) +{ + switch (from) + { + case AlphaTestFunc::AlwaysPass: + return GL_ALWAYS; + case AlphaTestFunc::Equal: + return GL_EQUAL; + case AlphaTestFunc::Gequal: + return GL_GEQUAL; + case AlphaTestFunc::Greater: + return GL_GREATER; + case AlphaTestFunc::Lequal: + return GL_LEQUAL; + case AlphaTestFunc::Less: + return GL_LESS; + case AlphaTestFunc::Never: + return GL_NEVER; + case AlphaTestFunc::NotEqual: + return GL_NOTEQUAL; + default: + UNREACHABLE(); + return 0; + } +} + +std::ostream &operator<<(std::ostream &os, AlphaTestFunc value) +{ + switch (value) + { + case AlphaTestFunc::AlwaysPass: + os << "GL_ALWAYS"; + break; + case AlphaTestFunc::Equal: + os << "GL_EQUAL"; + break; + case AlphaTestFunc::Gequal: + os << "GL_GEQUAL"; + break; + case AlphaTestFunc::Greater: + os << "GL_GREATER"; + break; + case AlphaTestFunc::Lequal: + os << "GL_LEQUAL"; + break; + case AlphaTestFunc::Less: + os << "GL_LESS"; + break; + case AlphaTestFunc::Never: + os << "GL_NEVER"; + break; + case AlphaTestFunc::NotEqual: + os << "GL_NOTEQUAL"; + break; + default: + os << "GL_INVALID_ENUM"; + break; + } + return os; +} + +template <> +BufferBinding FromGLenum<BufferBinding>(GLenum from) +{ + switch (from) + { + case GL_ARRAY_BUFFER: + return BufferBinding::Array; + case GL_ATOMIC_COUNTER_BUFFER: + return BufferBinding::AtomicCounter; + case GL_COPY_READ_BUFFER: + return BufferBinding::CopyRead; + case GL_COPY_WRITE_BUFFER: + return BufferBinding::CopyWrite; + case GL_DISPATCH_INDIRECT_BUFFER: + return BufferBinding::DispatchIndirect; + case GL_DRAW_INDIRECT_BUFFER: + return BufferBinding::DrawIndirect; + case GL_ELEMENT_ARRAY_BUFFER: + return BufferBinding::ElementArray; + case GL_PIXEL_PACK_BUFFER: + return BufferBinding::PixelPack; + case GL_PIXEL_UNPACK_BUFFER: + return BufferBinding::PixelUnpack; + case GL_SHADER_STORAGE_BUFFER: + return BufferBinding::ShaderStorage; + case GL_TRANSFORM_FEEDBACK_BUFFER: + return BufferBinding::TransformFeedback; + case GL_UNIFORM_BUFFER: + return BufferBinding::Uniform; + default: + return BufferBinding::InvalidEnum; + } +} + +GLenum ToGLenum(BufferBinding from) +{ + switch (from) + { + case BufferBinding::Array: + return GL_ARRAY_BUFFER; + case BufferBinding::AtomicCounter: + return GL_ATOMIC_COUNTER_BUFFER; + case BufferBinding::CopyRead: + return GL_COPY_READ_BUFFER; + case BufferBinding::CopyWrite: + return GL_COPY_WRITE_BUFFER; + case BufferBinding::DispatchIndirect: + return GL_DISPATCH_INDIRECT_BUFFER; + case BufferBinding::DrawIndirect: + return GL_DRAW_INDIRECT_BUFFER; + case BufferBinding::ElementArray: + return GL_ELEMENT_ARRAY_BUFFER; + case BufferBinding::PixelPack: + return GL_PIXEL_PACK_BUFFER; + case BufferBinding::PixelUnpack: + return GL_PIXEL_UNPACK_BUFFER; + case BufferBinding::ShaderStorage: + return GL_SHADER_STORAGE_BUFFER; + case BufferBinding::TransformFeedback: + return GL_TRANSFORM_FEEDBACK_BUFFER; + case BufferBinding::Uniform: + return GL_UNIFORM_BUFFER; + default: + UNREACHABLE(); + return 0; + } +} + +std::ostream &operator<<(std::ostream &os, BufferBinding value) +{ + switch (value) + { + case BufferBinding::Array: + os << "GL_ARRAY_BUFFER"; + break; + case BufferBinding::AtomicCounter: + os << "GL_ATOMIC_COUNTER_BUFFER"; + break; + case BufferBinding::CopyRead: + os << "GL_COPY_READ_BUFFER"; + break; + case BufferBinding::CopyWrite: + os << "GL_COPY_WRITE_BUFFER"; + break; + case BufferBinding::DispatchIndirect: + os << "GL_DISPATCH_INDIRECT_BUFFER"; + break; + case BufferBinding::DrawIndirect: + os << "GL_DRAW_INDIRECT_BUFFER"; + break; + case BufferBinding::ElementArray: + os << "GL_ELEMENT_ARRAY_BUFFER"; + break; + case BufferBinding::PixelPack: + os << "GL_PIXEL_PACK_BUFFER"; + break; + case BufferBinding::PixelUnpack: + os << "GL_PIXEL_UNPACK_BUFFER"; + break; + case BufferBinding::ShaderStorage: + os << "GL_SHADER_STORAGE_BUFFER"; + break; + case BufferBinding::TransformFeedback: + os << "GL_TRANSFORM_FEEDBACK_BUFFER"; + break; + case BufferBinding::Uniform: + os << "GL_UNIFORM_BUFFER"; + break; + default: + os << "GL_INVALID_ENUM"; + break; + } + return os; +} + +template <> +BufferUsage FromGLenum<BufferUsage>(GLenum from) +{ + switch (from) + { + case GL_DYNAMIC_COPY: + return BufferUsage::DynamicCopy; + case GL_DYNAMIC_DRAW: + return BufferUsage::DynamicDraw; + case GL_DYNAMIC_READ: + return BufferUsage::DynamicRead; + case GL_STATIC_COPY: + return BufferUsage::StaticCopy; + case GL_STATIC_DRAW: + return BufferUsage::StaticDraw; + case GL_STATIC_READ: + return BufferUsage::StaticRead; + case GL_STREAM_COPY: + return BufferUsage::StreamCopy; + case GL_STREAM_DRAW: + return BufferUsage::StreamDraw; + case GL_STREAM_READ: + return BufferUsage::StreamRead; + default: + return BufferUsage::InvalidEnum; + } +} + +GLenum ToGLenum(BufferUsage from) +{ + switch (from) + { + case BufferUsage::DynamicCopy: + return GL_DYNAMIC_COPY; + case BufferUsage::DynamicDraw: + return GL_DYNAMIC_DRAW; + case BufferUsage::DynamicRead: + return GL_DYNAMIC_READ; + case BufferUsage::StaticCopy: + return GL_STATIC_COPY; + case BufferUsage::StaticDraw: + return GL_STATIC_DRAW; + case BufferUsage::StaticRead: + return GL_STATIC_READ; + case BufferUsage::StreamCopy: + return GL_STREAM_COPY; + case BufferUsage::StreamDraw: + return GL_STREAM_DRAW; + case BufferUsage::StreamRead: + return GL_STREAM_READ; + default: + UNREACHABLE(); + return 0; + } +} + +std::ostream &operator<<(std::ostream &os, BufferUsage value) +{ + switch (value) + { + case BufferUsage::DynamicCopy: + os << "GL_DYNAMIC_COPY"; + break; + case BufferUsage::DynamicDraw: + os << "GL_DYNAMIC_DRAW"; + break; + case BufferUsage::DynamicRead: + os << "GL_DYNAMIC_READ"; + break; + case BufferUsage::StaticCopy: + os << "GL_STATIC_COPY"; + break; + case BufferUsage::StaticDraw: + os << "GL_STATIC_DRAW"; + break; + case BufferUsage::StaticRead: + os << "GL_STATIC_READ"; + break; + case BufferUsage::StreamCopy: + os << "GL_STREAM_COPY"; + break; + case BufferUsage::StreamDraw: + os << "GL_STREAM_DRAW"; + break; + case BufferUsage::StreamRead: + os << "GL_STREAM_READ"; + break; + default: + os << "GL_INVALID_ENUM"; + break; + } + return os; +} + +template <> +ClientVertexArrayType FromGLenum<ClientVertexArrayType>(GLenum from) +{ + switch (from) + { + case GL_COLOR_ARRAY: + return ClientVertexArrayType::Color; + case GL_NORMAL_ARRAY: + return ClientVertexArrayType::Normal; + case GL_POINT_SIZE_ARRAY_OES: + return ClientVertexArrayType::PointSize; + case GL_TEXTURE_COORD_ARRAY: + return ClientVertexArrayType::TextureCoord; + case GL_VERTEX_ARRAY: + return ClientVertexArrayType::Vertex; + default: + return ClientVertexArrayType::InvalidEnum; + } +} + +GLenum ToGLenum(ClientVertexArrayType from) +{ + switch (from) + { + case ClientVertexArrayType::Color: + return GL_COLOR_ARRAY; + case ClientVertexArrayType::Normal: + return GL_NORMAL_ARRAY; + case ClientVertexArrayType::PointSize: + return GL_POINT_SIZE_ARRAY_OES; + case ClientVertexArrayType::TextureCoord: + return GL_TEXTURE_COORD_ARRAY; + case ClientVertexArrayType::Vertex: + return GL_VERTEX_ARRAY; + default: + UNREACHABLE(); + return 0; + } +} + +std::ostream &operator<<(std::ostream &os, ClientVertexArrayType value) +{ + switch (value) + { + case ClientVertexArrayType::Color: + os << "GL_COLOR_ARRAY"; + break; + case ClientVertexArrayType::Normal: + os << "GL_NORMAL_ARRAY"; + break; + case ClientVertexArrayType::PointSize: + os << "GL_POINT_SIZE_ARRAY_OES"; + break; + case ClientVertexArrayType::TextureCoord: + os << "GL_TEXTURE_COORD_ARRAY"; + break; + case ClientVertexArrayType::Vertex: + os << "GL_VERTEX_ARRAY"; + break; + default: + os << "GL_INVALID_ENUM"; + break; + } + return os; +} + +template <> +CullFaceMode FromGLenum<CullFaceMode>(GLenum from) +{ + switch (from) + { + case GL_BACK: + return CullFaceMode::Back; + case GL_FRONT: + return CullFaceMode::Front; + case GL_FRONT_AND_BACK: + return CullFaceMode::FrontAndBack; + default: + return CullFaceMode::InvalidEnum; + } +} + +GLenum ToGLenum(CullFaceMode from) +{ + switch (from) + { + case CullFaceMode::Back: + return GL_BACK; + case CullFaceMode::Front: + return GL_FRONT; + case CullFaceMode::FrontAndBack: + return GL_FRONT_AND_BACK; + default: + UNREACHABLE(); + return 0; + } +} + +std::ostream &operator<<(std::ostream &os, CullFaceMode value) +{ + switch (value) + { + case CullFaceMode::Back: + os << "GL_BACK"; + break; + case CullFaceMode::Front: + os << "GL_FRONT"; + break; + case CullFaceMode::FrontAndBack: + os << "GL_FRONT_AND_BACK"; + break; + default: + os << "GL_INVALID_ENUM"; + break; + } + return os; +} + +template <> +FilterMode FromGLenum<FilterMode>(GLenum from) +{ + switch (from) + { + case GL_NEAREST: + return FilterMode::Nearest; + case GL_LINEAR: + return FilterMode::Linear; + case GL_NEAREST_MIPMAP_NEAREST: + return FilterMode::NearestMipmapNearest; + case GL_NEAREST_MIPMAP_LINEAR: + return FilterMode::NearestMipmapLinear; + case GL_LINEAR_MIPMAP_LINEAR: + return FilterMode::LinearMipmapLinear; + default: + return FilterMode::InvalidEnum; + } +} + +GLenum ToGLenum(FilterMode from) +{ + switch (from) + { + case FilterMode::Nearest: + return GL_NEAREST; + case FilterMode::Linear: + return GL_LINEAR; + case FilterMode::NearestMipmapNearest: + return GL_NEAREST_MIPMAP_NEAREST; + case FilterMode::NearestMipmapLinear: + return GL_NEAREST_MIPMAP_LINEAR; + case FilterMode::LinearMipmapLinear: + return GL_LINEAR_MIPMAP_LINEAR; + default: + UNREACHABLE(); + return 0; + } +} + +std::ostream &operator<<(std::ostream &os, FilterMode value) +{ + switch (value) + { + case FilterMode::Nearest: + os << "GL_NEAREST"; + break; + case FilterMode::Linear: + os << "GL_LINEAR"; + break; + case FilterMode::NearestMipmapNearest: + os << "GL_NEAREST_MIPMAP_NEAREST"; + break; + case FilterMode::NearestMipmapLinear: + os << "GL_NEAREST_MIPMAP_LINEAR"; + break; + case FilterMode::LinearMipmapLinear: + os << "GL_LINEAR_MIPMAP_LINEAR"; + break; + default: + os << "GL_INVALID_ENUM"; + break; + } + return os; +} + +template <> +FogMode FromGLenum<FogMode>(GLenum from) +{ + switch (from) + { + case GL_EXP: + return FogMode::Exp; + case GL_EXP2: + return FogMode::Exp2; + case GL_LINEAR: + return FogMode::Linear; + default: + return FogMode::InvalidEnum; + } +} + +GLenum ToGLenum(FogMode from) +{ + switch (from) + { + case FogMode::Exp: + return GL_EXP; + case FogMode::Exp2: + return GL_EXP2; + case FogMode::Linear: + return GL_LINEAR; + default: + UNREACHABLE(); + return 0; + } +} + +std::ostream &operator<<(std::ostream &os, FogMode value) +{ + switch (value) + { + case FogMode::Exp: + os << "GL_EXP"; + break; + case FogMode::Exp2: + os << "GL_EXP2"; + break; + case FogMode::Linear: + os << "GL_LINEAR"; + break; + default: + os << "GL_INVALID_ENUM"; + break; + } + return os; +} + +template <> +GraphicsResetStatus FromGLenum<GraphicsResetStatus>(GLenum from) +{ + switch (from) + { + case GL_NO_ERROR: + return GraphicsResetStatus::NoError; + case GL_GUILTY_CONTEXT_RESET: + return GraphicsResetStatus::GuiltyContextReset; + case GL_INNOCENT_CONTEXT_RESET: + return GraphicsResetStatus::InnocentContextReset; + case GL_UNKNOWN_CONTEXT_RESET: + return GraphicsResetStatus::UnknownContextReset; + default: + return GraphicsResetStatus::InvalidEnum; + } +} + +GLenum ToGLenum(GraphicsResetStatus from) +{ + switch (from) + { + case GraphicsResetStatus::NoError: + return GL_NO_ERROR; + case GraphicsResetStatus::GuiltyContextReset: + return GL_GUILTY_CONTEXT_RESET; + case GraphicsResetStatus::InnocentContextReset: + return GL_INNOCENT_CONTEXT_RESET; + case GraphicsResetStatus::UnknownContextReset: + return GL_UNKNOWN_CONTEXT_RESET; + default: + UNREACHABLE(); + return 0; + } +} + +std::ostream &operator<<(std::ostream &os, GraphicsResetStatus value) +{ + switch (value) + { + case GraphicsResetStatus::NoError: + os << "GL_NO_ERROR"; + break; + case GraphicsResetStatus::GuiltyContextReset: + os << "GL_GUILTY_CONTEXT_RESET"; + break; + case GraphicsResetStatus::InnocentContextReset: + os << "GL_INNOCENT_CONTEXT_RESET"; + break; + case GraphicsResetStatus::UnknownContextReset: + os << "GL_UNKNOWN_CONTEXT_RESET"; + break; + default: + os << "GL_INVALID_ENUM"; + break; + } + return os; +} + +template <> +HandleType FromGLenum<HandleType>(GLenum from) +{ + switch (from) + { + case GL_HANDLE_TYPE_OPAQUE_FD_EXT: + return HandleType::OpaqueFd; + default: + return HandleType::InvalidEnum; + } +} + +GLenum ToGLenum(HandleType from) +{ + switch (from) + { + case HandleType::OpaqueFd: + return GL_HANDLE_TYPE_OPAQUE_FD_EXT; + default: + UNREACHABLE(); + return 0; + } +} + +std::ostream &operator<<(std::ostream &os, HandleType value) +{ + switch (value) + { + case HandleType::OpaqueFd: + os << "GL_HANDLE_TYPE_OPAQUE_FD_EXT"; + break; + default: + os << "GL_INVALID_ENUM"; + break; + } + return os; +} + +template <> +HintSetting FromGLenum<HintSetting>(GLenum from) +{ + switch (from) + { + case GL_DONT_CARE: + return HintSetting::DontCare; + case GL_FASTEST: + return HintSetting::Fastest; + case GL_NICEST: + return HintSetting::Nicest; + default: + return HintSetting::InvalidEnum; + } +} + +GLenum ToGLenum(HintSetting from) +{ + switch (from) + { + case HintSetting::DontCare: + return GL_DONT_CARE; + case HintSetting::Fastest: + return GL_FASTEST; + case HintSetting::Nicest: + return GL_NICEST; + default: + UNREACHABLE(); + return 0; + } +} + +std::ostream &operator<<(std::ostream &os, HintSetting value) +{ + switch (value) + { + case HintSetting::DontCare: + os << "GL_DONT_CARE"; + break; + case HintSetting::Fastest: + os << "GL_FASTEST"; + break; + case HintSetting::Nicest: + os << "GL_NICEST"; + break; + default: + os << "GL_INVALID_ENUM"; + break; + } + return os; +} + +template <> +ImageLayout FromGLenum<ImageLayout>(GLenum from) +{ + switch (from) + { + case GL_NONE: + return ImageLayout::Undefined; + case GL_LAYOUT_GENERAL_EXT: + return ImageLayout::General; + case GL_LAYOUT_COLOR_ATTACHMENT_EXT: + return ImageLayout::ColorAttachment; + case GL_LAYOUT_DEPTH_STENCIL_ATTACHMENT_EXT: + return ImageLayout::DepthStencilAttachment; + case GL_LAYOUT_DEPTH_STENCIL_READ_ONLY_EXT: + return ImageLayout::DepthStencilReadOnlyAttachment; + case GL_LAYOUT_SHADER_READ_ONLY_EXT: + return ImageLayout::ShaderReadOnly; + case GL_LAYOUT_TRANSFER_SRC_EXT: + return ImageLayout::TransferSrc; + case GL_LAYOUT_TRANSFER_DST_EXT: + return ImageLayout::TransferDst; + case GL_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_EXT: + return ImageLayout::DepthReadOnlyStencilAttachment; + case GL_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_EXT: + return ImageLayout::DepthAttachmentStencilReadOnly; + default: + return ImageLayout::InvalidEnum; + } +} + +GLenum ToGLenum(ImageLayout from) +{ + switch (from) + { + case ImageLayout::Undefined: + return GL_NONE; + case ImageLayout::General: + return GL_LAYOUT_GENERAL_EXT; + case ImageLayout::ColorAttachment: + return GL_LAYOUT_COLOR_ATTACHMENT_EXT; + case ImageLayout::DepthStencilAttachment: + return GL_LAYOUT_DEPTH_STENCIL_ATTACHMENT_EXT; + case ImageLayout::DepthStencilReadOnlyAttachment: + return GL_LAYOUT_DEPTH_STENCIL_READ_ONLY_EXT; + case ImageLayout::ShaderReadOnly: + return GL_LAYOUT_SHADER_READ_ONLY_EXT; + case ImageLayout::TransferSrc: + return GL_LAYOUT_TRANSFER_SRC_EXT; + case ImageLayout::TransferDst: + return GL_LAYOUT_TRANSFER_DST_EXT; + case ImageLayout::DepthReadOnlyStencilAttachment: + return GL_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_EXT; + case ImageLayout::DepthAttachmentStencilReadOnly: + return GL_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_EXT; + default: + UNREACHABLE(); + return 0; + } +} + +std::ostream &operator<<(std::ostream &os, ImageLayout value) +{ + switch (value) + { + case ImageLayout::Undefined: + os << "GL_NONE"; + break; + case ImageLayout::General: + os << "GL_LAYOUT_GENERAL_EXT"; + break; + case ImageLayout::ColorAttachment: + os << "GL_LAYOUT_COLOR_ATTACHMENT_EXT"; + break; + case ImageLayout::DepthStencilAttachment: + os << "GL_LAYOUT_DEPTH_STENCIL_ATTACHMENT_EXT"; + break; + case ImageLayout::DepthStencilReadOnlyAttachment: + os << "GL_LAYOUT_DEPTH_STENCIL_READ_ONLY_EXT"; + break; + case ImageLayout::ShaderReadOnly: + os << "GL_LAYOUT_SHADER_READ_ONLY_EXT"; + break; + case ImageLayout::TransferSrc: + os << "GL_LAYOUT_TRANSFER_SRC_EXT"; + break; + case ImageLayout::TransferDst: + os << "GL_LAYOUT_TRANSFER_DST_EXT"; + break; + case ImageLayout::DepthReadOnlyStencilAttachment: + os << "GL_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_EXT"; + break; + case ImageLayout::DepthAttachmentStencilReadOnly: + os << "GL_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_EXT"; + break; + default: + os << "GL_INVALID_ENUM"; + break; + } + return os; +} + +template <> +LightParameter FromGLenum<LightParameter>(GLenum from) +{ + switch (from) + { + case GL_AMBIENT: + return LightParameter::Ambient; + case GL_AMBIENT_AND_DIFFUSE: + return LightParameter::AmbientAndDiffuse; + case GL_CONSTANT_ATTENUATION: + return LightParameter::ConstantAttenuation; + case GL_DIFFUSE: + return LightParameter::Diffuse; + case GL_LINEAR_ATTENUATION: + return LightParameter::LinearAttenuation; + case GL_POSITION: + return LightParameter::Position; + case GL_QUADRATIC_ATTENUATION: + return LightParameter::QuadraticAttenuation; + case GL_SPECULAR: + return LightParameter::Specular; + case GL_SPOT_CUTOFF: + return LightParameter::SpotCutoff; + case GL_SPOT_DIRECTION: + return LightParameter::SpotDirection; + case GL_SPOT_EXPONENT: + return LightParameter::SpotExponent; + default: + return LightParameter::InvalidEnum; + } +} + +GLenum ToGLenum(LightParameter from) +{ + switch (from) + { + case LightParameter::Ambient: + return GL_AMBIENT; + case LightParameter::AmbientAndDiffuse: + return GL_AMBIENT_AND_DIFFUSE; + case LightParameter::ConstantAttenuation: + return GL_CONSTANT_ATTENUATION; + case LightParameter::Diffuse: + return GL_DIFFUSE; + case LightParameter::LinearAttenuation: + return GL_LINEAR_ATTENUATION; + case LightParameter::Position: + return GL_POSITION; + case LightParameter::QuadraticAttenuation: + return GL_QUADRATIC_ATTENUATION; + case LightParameter::Specular: + return GL_SPECULAR; + case LightParameter::SpotCutoff: + return GL_SPOT_CUTOFF; + case LightParameter::SpotDirection: + return GL_SPOT_DIRECTION; + case LightParameter::SpotExponent: + return GL_SPOT_EXPONENT; + default: + UNREACHABLE(); + return 0; + } +} + +std::ostream &operator<<(std::ostream &os, LightParameter value) +{ + switch (value) + { + case LightParameter::Ambient: + os << "GL_AMBIENT"; + break; + case LightParameter::AmbientAndDiffuse: + os << "GL_AMBIENT_AND_DIFFUSE"; + break; + case LightParameter::ConstantAttenuation: + os << "GL_CONSTANT_ATTENUATION"; + break; + case LightParameter::Diffuse: + os << "GL_DIFFUSE"; + break; + case LightParameter::LinearAttenuation: + os << "GL_LINEAR_ATTENUATION"; + break; + case LightParameter::Position: + os << "GL_POSITION"; + break; + case LightParameter::QuadraticAttenuation: + os << "GL_QUADRATIC_ATTENUATION"; + break; + case LightParameter::Specular: + os << "GL_SPECULAR"; + break; + case LightParameter::SpotCutoff: + os << "GL_SPOT_CUTOFF"; + break; + case LightParameter::SpotDirection: + os << "GL_SPOT_DIRECTION"; + break; + case LightParameter::SpotExponent: + os << "GL_SPOT_EXPONENT"; + break; + default: + os << "GL_INVALID_ENUM"; + break; + } + return os; +} + +template <> +LogicalOperation FromGLenum<LogicalOperation>(GLenum from) +{ + switch (from) + { + case GL_AND: + return LogicalOperation::And; + case GL_AND_INVERTED: + return LogicalOperation::AndInverted; + case GL_AND_REVERSE: + return LogicalOperation::AndReverse; + case GL_CLEAR: + return LogicalOperation::Clear; + case GL_COPY: + return LogicalOperation::Copy; + case GL_COPY_INVERTED: + return LogicalOperation::CopyInverted; + case GL_EQUIV: + return LogicalOperation::Equiv; + case GL_INVERT: + return LogicalOperation::Invert; + case GL_NAND: + return LogicalOperation::Nand; + case GL_NOOP: + return LogicalOperation::Noop; + case GL_NOR: + return LogicalOperation::Nor; + case GL_OR: + return LogicalOperation::Or; + case GL_OR_INVERTED: + return LogicalOperation::OrInverted; + case GL_OR_REVERSE: + return LogicalOperation::OrReverse; + case GL_SET: + return LogicalOperation::Set; + case GL_XOR: + return LogicalOperation::Xor; + default: + return LogicalOperation::InvalidEnum; + } +} + +GLenum ToGLenum(LogicalOperation from) +{ + switch (from) + { + case LogicalOperation::And: + return GL_AND; + case LogicalOperation::AndInverted: + return GL_AND_INVERTED; + case LogicalOperation::AndReverse: + return GL_AND_REVERSE; + case LogicalOperation::Clear: + return GL_CLEAR; + case LogicalOperation::Copy: + return GL_COPY; + case LogicalOperation::CopyInverted: + return GL_COPY_INVERTED; + case LogicalOperation::Equiv: + return GL_EQUIV; + case LogicalOperation::Invert: + return GL_INVERT; + case LogicalOperation::Nand: + return GL_NAND; + case LogicalOperation::Noop: + return GL_NOOP; + case LogicalOperation::Nor: + return GL_NOR; + case LogicalOperation::Or: + return GL_OR; + case LogicalOperation::OrInverted: + return GL_OR_INVERTED; + case LogicalOperation::OrReverse: + return GL_OR_REVERSE; + case LogicalOperation::Set: + return GL_SET; + case LogicalOperation::Xor: + return GL_XOR; + default: + UNREACHABLE(); + return 0; + } +} + +std::ostream &operator<<(std::ostream &os, LogicalOperation value) +{ + switch (value) + { + case LogicalOperation::And: + os << "GL_AND"; + break; + case LogicalOperation::AndInverted: + os << "GL_AND_INVERTED"; + break; + case LogicalOperation::AndReverse: + os << "GL_AND_REVERSE"; + break; + case LogicalOperation::Clear: + os << "GL_CLEAR"; + break; + case LogicalOperation::Copy: + os << "GL_COPY"; + break; + case LogicalOperation::CopyInverted: + os << "GL_COPY_INVERTED"; + break; + case LogicalOperation::Equiv: + os << "GL_EQUIV"; + break; + case LogicalOperation::Invert: + os << "GL_INVERT"; + break; + case LogicalOperation::Nand: + os << "GL_NAND"; + break; + case LogicalOperation::Noop: + os << "GL_NOOP"; + break; + case LogicalOperation::Nor: + os << "GL_NOR"; + break; + case LogicalOperation::Or: + os << "GL_OR"; + break; + case LogicalOperation::OrInverted: + os << "GL_OR_INVERTED"; + break; + case LogicalOperation::OrReverse: + os << "GL_OR_REVERSE"; + break; + case LogicalOperation::Set: + os << "GL_SET"; + break; + case LogicalOperation::Xor: + os << "GL_XOR"; + break; + default: + os << "GL_INVALID_ENUM"; + break; + } + return os; +} + +template <> +MaterialParameter FromGLenum<MaterialParameter>(GLenum from) +{ + switch (from) + { + case GL_AMBIENT: + return MaterialParameter::Ambient; + case GL_AMBIENT_AND_DIFFUSE: + return MaterialParameter::AmbientAndDiffuse; + case GL_DIFFUSE: + return MaterialParameter::Diffuse; + case GL_EMISSION: + return MaterialParameter::Emission; + case GL_SHININESS: + return MaterialParameter::Shininess; + case GL_SPECULAR: + return MaterialParameter::Specular; + default: + return MaterialParameter::InvalidEnum; + } +} + +GLenum ToGLenum(MaterialParameter from) +{ + switch (from) + { + case MaterialParameter::Ambient: + return GL_AMBIENT; + case MaterialParameter::AmbientAndDiffuse: + return GL_AMBIENT_AND_DIFFUSE; + case MaterialParameter::Diffuse: + return GL_DIFFUSE; + case MaterialParameter::Emission: + return GL_EMISSION; + case MaterialParameter::Shininess: + return GL_SHININESS; + case MaterialParameter::Specular: + return GL_SPECULAR; + default: + UNREACHABLE(); + return 0; + } +} + +std::ostream &operator<<(std::ostream &os, MaterialParameter value) +{ + switch (value) + { + case MaterialParameter::Ambient: + os << "GL_AMBIENT"; + break; + case MaterialParameter::AmbientAndDiffuse: + os << "GL_AMBIENT_AND_DIFFUSE"; + break; + case MaterialParameter::Diffuse: + os << "GL_DIFFUSE"; + break; + case MaterialParameter::Emission: + os << "GL_EMISSION"; + break; + case MaterialParameter::Shininess: + os << "GL_SHININESS"; + break; + case MaterialParameter::Specular: + os << "GL_SPECULAR"; + break; + default: + os << "GL_INVALID_ENUM"; + break; + } + return os; +} + +template <> +MatrixType FromGLenum<MatrixType>(GLenum from) +{ + switch (from) + { + case GL_MODELVIEW: + return MatrixType::Modelview; + case GL_PROJECTION: + return MatrixType::Projection; + case GL_TEXTURE: + return MatrixType::Texture; + default: + return MatrixType::InvalidEnum; + } +} + +GLenum ToGLenum(MatrixType from) +{ + switch (from) + { + case MatrixType::Modelview: + return GL_MODELVIEW; + case MatrixType::Projection: + return GL_PROJECTION; + case MatrixType::Texture: + return GL_TEXTURE; + default: + UNREACHABLE(); + return 0; + } +} + +std::ostream &operator<<(std::ostream &os, MatrixType value) +{ + switch (value) + { + case MatrixType::Modelview: + os << "GL_MODELVIEW"; + break; + case MatrixType::Projection: + os << "GL_PROJECTION"; + break; + case MatrixType::Texture: + os << "GL_TEXTURE"; + break; + default: + os << "GL_INVALID_ENUM"; + break; + } + return os; +} + +template <> +PointParameter FromGLenum<PointParameter>(GLenum from) +{ + switch (from) + { + case GL_POINT_SIZE_MIN: + return PointParameter::PointSizeMin; + case GL_POINT_SIZE_MAX: + return PointParameter::PointSizeMax; + case GL_POINT_FADE_THRESHOLD_SIZE: + return PointParameter::PointFadeThresholdSize; + case GL_POINT_DISTANCE_ATTENUATION: + return PointParameter::PointDistanceAttenuation; + default: + return PointParameter::InvalidEnum; + } +} + +GLenum ToGLenum(PointParameter from) +{ + switch (from) + { + case PointParameter::PointSizeMin: + return GL_POINT_SIZE_MIN; + case PointParameter::PointSizeMax: + return GL_POINT_SIZE_MAX; + case PointParameter::PointFadeThresholdSize: + return GL_POINT_FADE_THRESHOLD_SIZE; + case PointParameter::PointDistanceAttenuation: + return GL_POINT_DISTANCE_ATTENUATION; + default: + UNREACHABLE(); + return 0; + } +} + +std::ostream &operator<<(std::ostream &os, PointParameter value) +{ + switch (value) + { + case PointParameter::PointSizeMin: + os << "GL_POINT_SIZE_MIN"; + break; + case PointParameter::PointSizeMax: + os << "GL_POINT_SIZE_MAX"; + break; + case PointParameter::PointFadeThresholdSize: + os << "GL_POINT_FADE_THRESHOLD_SIZE"; + break; + case PointParameter::PointDistanceAttenuation: + os << "GL_POINT_DISTANCE_ATTENUATION"; + break; + default: + os << "GL_INVALID_ENUM"; + break; + } + return os; +} + +template <> +ProvokingVertexConvention FromGLenum<ProvokingVertexConvention>(GLenum from) +{ + switch (from) + { + case GL_FIRST_VERTEX_CONVENTION: + return ProvokingVertexConvention::FirstVertexConvention; + case GL_LAST_VERTEX_CONVENTION: + return ProvokingVertexConvention::LastVertexConvention; + default: + return ProvokingVertexConvention::InvalidEnum; + } +} + +GLenum ToGLenum(ProvokingVertexConvention from) +{ + switch (from) + { + case ProvokingVertexConvention::FirstVertexConvention: + return GL_FIRST_VERTEX_CONVENTION; + case ProvokingVertexConvention::LastVertexConvention: + return GL_LAST_VERTEX_CONVENTION; + default: + UNREACHABLE(); + return 0; + } +} + +std::ostream &operator<<(std::ostream &os, ProvokingVertexConvention value) +{ + switch (value) + { + case ProvokingVertexConvention::FirstVertexConvention: + os << "GL_FIRST_VERTEX_CONVENTION"; + break; + case ProvokingVertexConvention::LastVertexConvention: + os << "GL_LAST_VERTEX_CONVENTION"; + break; + default: + os << "GL_INVALID_ENUM"; + break; + } + return os; +} + +template <> +QueryType FromGLenum<QueryType>(GLenum from) +{ + switch (from) + { + case GL_ANY_SAMPLES_PASSED: + return QueryType::AnySamples; + case GL_ANY_SAMPLES_PASSED_CONSERVATIVE: + return QueryType::AnySamplesConservative; + case GL_COMMANDS_COMPLETED_CHROMIUM: + return QueryType::CommandsCompleted; + case GL_PRIMITIVES_GENERATED_EXT: + return QueryType::PrimitivesGenerated; + case GL_TIME_ELAPSED_EXT: + return QueryType::TimeElapsed; + case GL_TIMESTAMP_EXT: + return QueryType::Timestamp; + case GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN: + return QueryType::TransformFeedbackPrimitivesWritten; + default: + return QueryType::InvalidEnum; + } +} + +GLenum ToGLenum(QueryType from) +{ + switch (from) + { + case QueryType::AnySamples: + return GL_ANY_SAMPLES_PASSED; + case QueryType::AnySamplesConservative: + return GL_ANY_SAMPLES_PASSED_CONSERVATIVE; + case QueryType::CommandsCompleted: + return GL_COMMANDS_COMPLETED_CHROMIUM; + case QueryType::PrimitivesGenerated: + return GL_PRIMITIVES_GENERATED_EXT; + case QueryType::TimeElapsed: + return GL_TIME_ELAPSED_EXT; + case QueryType::Timestamp: + return GL_TIMESTAMP_EXT; + case QueryType::TransformFeedbackPrimitivesWritten: + return GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN; + default: + UNREACHABLE(); + return 0; + } +} + +std::ostream &operator<<(std::ostream &os, QueryType value) +{ + switch (value) + { + case QueryType::AnySamples: + os << "GL_ANY_SAMPLES_PASSED"; + break; + case QueryType::AnySamplesConservative: + os << "GL_ANY_SAMPLES_PASSED_CONSERVATIVE"; + break; + case QueryType::CommandsCompleted: + os << "GL_COMMANDS_COMPLETED_CHROMIUM"; + break; + case QueryType::PrimitivesGenerated: + os << "GL_PRIMITIVES_GENERATED_EXT"; + break; + case QueryType::TimeElapsed: + os << "GL_TIME_ELAPSED_EXT"; + break; + case QueryType::Timestamp: + os << "GL_TIMESTAMP_EXT"; + break; + case QueryType::TransformFeedbackPrimitivesWritten: + os << "GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN"; + break; + default: + os << "GL_INVALID_ENUM"; + break; + } + return os; +} + +template <> +ShaderType FromGLenum<ShaderType>(GLenum from) +{ + switch (from) + { + case GL_VERTEX_SHADER: + return ShaderType::Vertex; + case GL_FRAGMENT_SHADER: + return ShaderType::Fragment; + case GL_GEOMETRY_SHADER_EXT: + return ShaderType::Geometry; + case GL_COMPUTE_SHADER: + return ShaderType::Compute; + default: + return ShaderType::InvalidEnum; + } +} + +GLenum ToGLenum(ShaderType from) +{ + switch (from) + { + case ShaderType::Vertex: + return GL_VERTEX_SHADER; + case ShaderType::Fragment: + return GL_FRAGMENT_SHADER; + case ShaderType::Geometry: + return GL_GEOMETRY_SHADER_EXT; + case ShaderType::Compute: + return GL_COMPUTE_SHADER; + default: + UNREACHABLE(); + return 0; + } +} + +std::ostream &operator<<(std::ostream &os, ShaderType value) +{ + switch (value) + { + case ShaderType::Vertex: + os << "GL_VERTEX_SHADER"; + break; + case ShaderType::Fragment: + os << "GL_FRAGMENT_SHADER"; + break; + case ShaderType::Geometry: + os << "GL_GEOMETRY_SHADER_EXT"; + break; + case ShaderType::Compute: + os << "GL_COMPUTE_SHADER"; + break; + default: + os << "GL_INVALID_ENUM"; + break; + } + return os; +} + +template <> +ShadingModel FromGLenum<ShadingModel>(GLenum from) +{ + switch (from) + { + case GL_FLAT: + return ShadingModel::Flat; + case GL_SMOOTH: + return ShadingModel::Smooth; + default: + return ShadingModel::InvalidEnum; + } +} + +GLenum ToGLenum(ShadingModel from) +{ + switch (from) + { + case ShadingModel::Flat: + return GL_FLAT; + case ShadingModel::Smooth: + return GL_SMOOTH; + default: + UNREACHABLE(); + return 0; + } +} + +std::ostream &operator<<(std::ostream &os, ShadingModel value) +{ + switch (value) + { + case ShadingModel::Flat: + os << "GL_FLAT"; + break; + case ShadingModel::Smooth: + os << "GL_SMOOTH"; + break; + default: + os << "GL_INVALID_ENUM"; + break; + } + return os; +} + +template <> +TextureCombine FromGLenum<TextureCombine>(GLenum from) +{ + switch (from) + { + case GL_ADD: + return TextureCombine::Add; + case GL_ADD_SIGNED: + return TextureCombine::AddSigned; + case GL_DOT3_RGB: + return TextureCombine::Dot3Rgb; + case GL_DOT3_RGBA: + return TextureCombine::Dot3Rgba; + case GL_INTERPOLATE: + return TextureCombine::Interpolate; + case GL_MODULATE: + return TextureCombine::Modulate; + case GL_REPLACE: + return TextureCombine::Replace; + case GL_SUBTRACT: + return TextureCombine::Subtract; + default: + return TextureCombine::InvalidEnum; + } +} + +GLenum ToGLenum(TextureCombine from) +{ + switch (from) + { + case TextureCombine::Add: + return GL_ADD; + case TextureCombine::AddSigned: + return GL_ADD_SIGNED; + case TextureCombine::Dot3Rgb: + return GL_DOT3_RGB; + case TextureCombine::Dot3Rgba: + return GL_DOT3_RGBA; + case TextureCombine::Interpolate: + return GL_INTERPOLATE; + case TextureCombine::Modulate: + return GL_MODULATE; + case TextureCombine::Replace: + return GL_REPLACE; + case TextureCombine::Subtract: + return GL_SUBTRACT; + default: + UNREACHABLE(); + return 0; + } +} + +std::ostream &operator<<(std::ostream &os, TextureCombine value) +{ + switch (value) + { + case TextureCombine::Add: + os << "GL_ADD"; + break; + case TextureCombine::AddSigned: + os << "GL_ADD_SIGNED"; + break; + case TextureCombine::Dot3Rgb: + os << "GL_DOT3_RGB"; + break; + case TextureCombine::Dot3Rgba: + os << "GL_DOT3_RGBA"; + break; + case TextureCombine::Interpolate: + os << "GL_INTERPOLATE"; + break; + case TextureCombine::Modulate: + os << "GL_MODULATE"; + break; + case TextureCombine::Replace: + os << "GL_REPLACE"; + break; + case TextureCombine::Subtract: + os << "GL_SUBTRACT"; + break; + default: + os << "GL_INVALID_ENUM"; + break; + } + return os; +} + +template <> +TextureEnvMode FromGLenum<TextureEnvMode>(GLenum from) +{ + switch (from) + { + case GL_ADD: + return TextureEnvMode::Add; + case GL_BLEND: + return TextureEnvMode::Blend; + case GL_COMBINE: + return TextureEnvMode::Combine; + case GL_DECAL: + return TextureEnvMode::Decal; + case GL_MODULATE: + return TextureEnvMode::Modulate; + case GL_REPLACE: + return TextureEnvMode::Replace; + default: + return TextureEnvMode::InvalidEnum; + } +} + +GLenum ToGLenum(TextureEnvMode from) +{ + switch (from) + { + case TextureEnvMode::Add: + return GL_ADD; + case TextureEnvMode::Blend: + return GL_BLEND; + case TextureEnvMode::Combine: + return GL_COMBINE; + case TextureEnvMode::Decal: + return GL_DECAL; + case TextureEnvMode::Modulate: + return GL_MODULATE; + case TextureEnvMode::Replace: + return GL_REPLACE; + default: + UNREACHABLE(); + return 0; + } +} + +std::ostream &operator<<(std::ostream &os, TextureEnvMode value) +{ + switch (value) + { + case TextureEnvMode::Add: + os << "GL_ADD"; + break; + case TextureEnvMode::Blend: + os << "GL_BLEND"; + break; + case TextureEnvMode::Combine: + os << "GL_COMBINE"; + break; + case TextureEnvMode::Decal: + os << "GL_DECAL"; + break; + case TextureEnvMode::Modulate: + os << "GL_MODULATE"; + break; + case TextureEnvMode::Replace: + os << "GL_REPLACE"; + break; + default: + os << "GL_INVALID_ENUM"; + break; + } + return os; +} + +template <> +TextureEnvParameter FromGLenum<TextureEnvParameter>(GLenum from) +{ + switch (from) + { + case GL_TEXTURE_ENV_MODE: + return TextureEnvParameter::Mode; + case GL_TEXTURE_ENV_COLOR: + return TextureEnvParameter::Color; + case GL_COMBINE_RGB: + return TextureEnvParameter::CombineRgb; + case GL_COMBINE_ALPHA: + return TextureEnvParameter::CombineAlpha; + case GL_RGB_SCALE: + return TextureEnvParameter::RgbScale; + case GL_ALPHA_SCALE: + return TextureEnvParameter::AlphaScale; + case GL_SRC0_RGB: + return TextureEnvParameter::Src0Rgb; + case GL_SRC1_RGB: + return TextureEnvParameter::Src1Rgb; + case GL_SRC2_RGB: + return TextureEnvParameter::Src2Rgb; + case GL_SRC0_ALPHA: + return TextureEnvParameter::Src0Alpha; + case GL_SRC1_ALPHA: + return TextureEnvParameter::Src1Alpha; + case GL_SRC2_ALPHA: + return TextureEnvParameter::Src2Alpha; + case GL_OPERAND0_RGB: + return TextureEnvParameter::Op0Rgb; + case GL_OPERAND1_RGB: + return TextureEnvParameter::Op1Rgb; + case GL_OPERAND2_RGB: + return TextureEnvParameter::Op2Rgb; + case GL_OPERAND0_ALPHA: + return TextureEnvParameter::Op0Alpha; + case GL_OPERAND1_ALPHA: + return TextureEnvParameter::Op1Alpha; + case GL_OPERAND2_ALPHA: + return TextureEnvParameter::Op2Alpha; + case GL_COORD_REPLACE_OES: + return TextureEnvParameter::PointCoordReplace; + default: + return TextureEnvParameter::InvalidEnum; + } +} + +GLenum ToGLenum(TextureEnvParameter from) +{ + switch (from) + { + case TextureEnvParameter::Mode: + return GL_TEXTURE_ENV_MODE; + case TextureEnvParameter::Color: + return GL_TEXTURE_ENV_COLOR; + case TextureEnvParameter::CombineRgb: + return GL_COMBINE_RGB; + case TextureEnvParameter::CombineAlpha: + return GL_COMBINE_ALPHA; + case TextureEnvParameter::RgbScale: + return GL_RGB_SCALE; + case TextureEnvParameter::AlphaScale: + return GL_ALPHA_SCALE; + case TextureEnvParameter::Src0Rgb: + return GL_SRC0_RGB; + case TextureEnvParameter::Src1Rgb: + return GL_SRC1_RGB; + case TextureEnvParameter::Src2Rgb: + return GL_SRC2_RGB; + case TextureEnvParameter::Src0Alpha: + return GL_SRC0_ALPHA; + case TextureEnvParameter::Src1Alpha: + return GL_SRC1_ALPHA; + case TextureEnvParameter::Src2Alpha: + return GL_SRC2_ALPHA; + case TextureEnvParameter::Op0Rgb: + return GL_OPERAND0_RGB; + case TextureEnvParameter::Op1Rgb: + return GL_OPERAND1_RGB; + case TextureEnvParameter::Op2Rgb: + return GL_OPERAND2_RGB; + case TextureEnvParameter::Op0Alpha: + return GL_OPERAND0_ALPHA; + case TextureEnvParameter::Op1Alpha: + return GL_OPERAND1_ALPHA; + case TextureEnvParameter::Op2Alpha: + return GL_OPERAND2_ALPHA; + case TextureEnvParameter::PointCoordReplace: + return GL_COORD_REPLACE_OES; + default: + UNREACHABLE(); + return 0; + } +} + +std::ostream &operator<<(std::ostream &os, TextureEnvParameter value) +{ + switch (value) + { + case TextureEnvParameter::Mode: + os << "GL_TEXTURE_ENV_MODE"; + break; + case TextureEnvParameter::Color: + os << "GL_TEXTURE_ENV_COLOR"; + break; + case TextureEnvParameter::CombineRgb: + os << "GL_COMBINE_RGB"; + break; + case TextureEnvParameter::CombineAlpha: + os << "GL_COMBINE_ALPHA"; + break; + case TextureEnvParameter::RgbScale: + os << "GL_RGB_SCALE"; + break; + case TextureEnvParameter::AlphaScale: + os << "GL_ALPHA_SCALE"; + break; + case TextureEnvParameter::Src0Rgb: + os << "GL_SRC0_RGB"; + break; + case TextureEnvParameter::Src1Rgb: + os << "GL_SRC1_RGB"; + break; + case TextureEnvParameter::Src2Rgb: + os << "GL_SRC2_RGB"; + break; + case TextureEnvParameter::Src0Alpha: + os << "GL_SRC0_ALPHA"; + break; + case TextureEnvParameter::Src1Alpha: + os << "GL_SRC1_ALPHA"; + break; + case TextureEnvParameter::Src2Alpha: + os << "GL_SRC2_ALPHA"; + break; + case TextureEnvParameter::Op0Rgb: + os << "GL_OPERAND0_RGB"; + break; + case TextureEnvParameter::Op1Rgb: + os << "GL_OPERAND1_RGB"; + break; + case TextureEnvParameter::Op2Rgb: + os << "GL_OPERAND2_RGB"; + break; + case TextureEnvParameter::Op0Alpha: + os << "GL_OPERAND0_ALPHA"; + break; + case TextureEnvParameter::Op1Alpha: + os << "GL_OPERAND1_ALPHA"; + break; + case TextureEnvParameter::Op2Alpha: + os << "GL_OPERAND2_ALPHA"; + break; + case TextureEnvParameter::PointCoordReplace: + os << "GL_COORD_REPLACE_OES"; + break; + default: + os << "GL_INVALID_ENUM"; + break; + } + return os; +} + +template <> +TextureEnvTarget FromGLenum<TextureEnvTarget>(GLenum from) +{ + switch (from) + { + case GL_TEXTURE_ENV: + return TextureEnvTarget::Env; + case GL_POINT_SPRITE_OES: + return TextureEnvTarget::PointSprite; + default: + return TextureEnvTarget::InvalidEnum; + } +} + +GLenum ToGLenum(TextureEnvTarget from) +{ + switch (from) + { + case TextureEnvTarget::Env: + return GL_TEXTURE_ENV; + case TextureEnvTarget::PointSprite: + return GL_POINT_SPRITE_OES; + default: + UNREACHABLE(); + return 0; + } +} + +std::ostream &operator<<(std::ostream &os, TextureEnvTarget value) +{ + switch (value) + { + case TextureEnvTarget::Env: + os << "GL_TEXTURE_ENV"; + break; + case TextureEnvTarget::PointSprite: + os << "GL_POINT_SPRITE_OES"; + break; + default: + os << "GL_INVALID_ENUM"; + break; + } + return os; +} + +template <> +TextureOp FromGLenum<TextureOp>(GLenum from) +{ + switch (from) + { + case GL_ONE_MINUS_SRC_ALPHA: + return TextureOp::OneMinusSrcAlpha; + case GL_ONE_MINUS_SRC_COLOR: + return TextureOp::OneMinusSrcColor; + case GL_SRC_ALPHA: + return TextureOp::SrcAlpha; + case GL_SRC_COLOR: + return TextureOp::SrcColor; + default: + return TextureOp::InvalidEnum; + } +} + +GLenum ToGLenum(TextureOp from) +{ + switch (from) + { + case TextureOp::OneMinusSrcAlpha: + return GL_ONE_MINUS_SRC_ALPHA; + case TextureOp::OneMinusSrcColor: + return GL_ONE_MINUS_SRC_COLOR; + case TextureOp::SrcAlpha: + return GL_SRC_ALPHA; + case TextureOp::SrcColor: + return GL_SRC_COLOR; + default: + UNREACHABLE(); + return 0; + } +} + +std::ostream &operator<<(std::ostream &os, TextureOp value) +{ + switch (value) + { + case TextureOp::OneMinusSrcAlpha: + os << "GL_ONE_MINUS_SRC_ALPHA"; + break; + case TextureOp::OneMinusSrcColor: + os << "GL_ONE_MINUS_SRC_COLOR"; + break; + case TextureOp::SrcAlpha: + os << "GL_SRC_ALPHA"; + break; + case TextureOp::SrcColor: + os << "GL_SRC_COLOR"; + break; + default: + os << "GL_INVALID_ENUM"; + break; + } + return os; +} + +template <> +TextureSrc FromGLenum<TextureSrc>(GLenum from) +{ + switch (from) + { + case GL_CONSTANT: + return TextureSrc::Constant; + case GL_PREVIOUS: + return TextureSrc::Previous; + case GL_PRIMARY_COLOR: + return TextureSrc::PrimaryColor; + case GL_TEXTURE: + return TextureSrc::Texture; + default: + return TextureSrc::InvalidEnum; + } +} + +GLenum ToGLenum(TextureSrc from) +{ + switch (from) + { + case TextureSrc::Constant: + return GL_CONSTANT; + case TextureSrc::Previous: + return GL_PREVIOUS; + case TextureSrc::PrimaryColor: + return GL_PRIMARY_COLOR; + case TextureSrc::Texture: + return GL_TEXTURE; + default: + UNREACHABLE(); + return 0; + } +} + +std::ostream &operator<<(std::ostream &os, TextureSrc value) +{ + switch (value) + { + case TextureSrc::Constant: + os << "GL_CONSTANT"; + break; + case TextureSrc::Previous: + os << "GL_PREVIOUS"; + break; + case TextureSrc::PrimaryColor: + os << "GL_PRIMARY_COLOR"; + break; + case TextureSrc::Texture: + os << "GL_TEXTURE"; + break; + default: + os << "GL_INVALID_ENUM"; + break; + } + return os; +} + +template <> +TextureTarget FromGLenum<TextureTarget>(GLenum from) +{ + switch (from) + { + case GL_TEXTURE_2D: + return TextureTarget::_2D; + case GL_TEXTURE_2D_ARRAY: + return TextureTarget::_2DArray; + case GL_TEXTURE_2D_MULTISAMPLE: + return TextureTarget::_2DMultisample; + case GL_TEXTURE_2D_MULTISAMPLE_ARRAY_OES: + return TextureTarget::_2DMultisampleArray; + case GL_TEXTURE_3D: + return TextureTarget::_3D; + case GL_TEXTURE_EXTERNAL_OES: + return TextureTarget::External; + case GL_TEXTURE_RECTANGLE_ANGLE: + return TextureTarget::Rectangle; + case GL_TEXTURE_CUBE_MAP_POSITIVE_X: + return TextureTarget::CubeMapPositiveX; + case GL_TEXTURE_CUBE_MAP_NEGATIVE_X: + return TextureTarget::CubeMapNegativeX; + case GL_TEXTURE_CUBE_MAP_POSITIVE_Y: + return TextureTarget::CubeMapPositiveY; + case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y: + return TextureTarget::CubeMapNegativeY; + case GL_TEXTURE_CUBE_MAP_POSITIVE_Z: + return TextureTarget::CubeMapPositiveZ; + case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z: + return TextureTarget::CubeMapNegativeZ; + default: + return TextureTarget::InvalidEnum; + } +} + +GLenum ToGLenum(TextureTarget from) +{ + switch (from) + { + case TextureTarget::_2D: + return GL_TEXTURE_2D; + case TextureTarget::_2DArray: + return GL_TEXTURE_2D_ARRAY; + case TextureTarget::_2DMultisample: + return GL_TEXTURE_2D_MULTISAMPLE; + case TextureTarget::_2DMultisampleArray: + return GL_TEXTURE_2D_MULTISAMPLE_ARRAY_OES; + case TextureTarget::_3D: + return GL_TEXTURE_3D; + case TextureTarget::External: + return GL_TEXTURE_EXTERNAL_OES; + case TextureTarget::Rectangle: + return GL_TEXTURE_RECTANGLE_ANGLE; + case TextureTarget::CubeMapPositiveX: + return GL_TEXTURE_CUBE_MAP_POSITIVE_X; + case TextureTarget::CubeMapNegativeX: + return GL_TEXTURE_CUBE_MAP_NEGATIVE_X; + case TextureTarget::CubeMapPositiveY: + return GL_TEXTURE_CUBE_MAP_POSITIVE_Y; + case TextureTarget::CubeMapNegativeY: + return GL_TEXTURE_CUBE_MAP_NEGATIVE_Y; + case TextureTarget::CubeMapPositiveZ: + return GL_TEXTURE_CUBE_MAP_POSITIVE_Z; + case TextureTarget::CubeMapNegativeZ: + return GL_TEXTURE_CUBE_MAP_NEGATIVE_Z; + default: + UNREACHABLE(); + return 0; + } +} + +std::ostream &operator<<(std::ostream &os, TextureTarget value) +{ + switch (value) + { + case TextureTarget::_2D: + os << "GL_TEXTURE_2D"; + break; + case TextureTarget::_2DArray: + os << "GL_TEXTURE_2D_ARRAY"; + break; + case TextureTarget::_2DMultisample: + os << "GL_TEXTURE_2D_MULTISAMPLE"; + break; + case TextureTarget::_2DMultisampleArray: + os << "GL_TEXTURE_2D_MULTISAMPLE_ARRAY_OES"; + break; + case TextureTarget::_3D: + os << "GL_TEXTURE_3D"; + break; + case TextureTarget::External: + os << "GL_TEXTURE_EXTERNAL_OES"; + break; + case TextureTarget::Rectangle: + os << "GL_TEXTURE_RECTANGLE_ANGLE"; + break; + case TextureTarget::CubeMapPositiveX: + os << "GL_TEXTURE_CUBE_MAP_POSITIVE_X"; + break; + case TextureTarget::CubeMapNegativeX: + os << "GL_TEXTURE_CUBE_MAP_NEGATIVE_X"; + break; + case TextureTarget::CubeMapPositiveY: + os << "GL_TEXTURE_CUBE_MAP_POSITIVE_Y"; + break; + case TextureTarget::CubeMapNegativeY: + os << "GL_TEXTURE_CUBE_MAP_NEGATIVE_Y"; + break; + case TextureTarget::CubeMapPositiveZ: + os << "GL_TEXTURE_CUBE_MAP_POSITIVE_Z"; + break; + case TextureTarget::CubeMapNegativeZ: + os << "GL_TEXTURE_CUBE_MAP_NEGATIVE_Z"; + break; + default: + os << "GL_INVALID_ENUM"; + break; + } + return os; +} + +template <> +TextureType FromGLenum<TextureType>(GLenum from) +{ + switch (from) + { + case GL_TEXTURE_2D: + return TextureType::_2D; + case GL_TEXTURE_2D_ARRAY: + return TextureType::_2DArray; + case GL_TEXTURE_2D_MULTISAMPLE: + return TextureType::_2DMultisample; + case GL_TEXTURE_2D_MULTISAMPLE_ARRAY_OES: + return TextureType::_2DMultisampleArray; + case GL_TEXTURE_3D: + return TextureType::_3D; + case GL_TEXTURE_EXTERNAL_OES: + return TextureType::External; + case GL_TEXTURE_RECTANGLE_ANGLE: + return TextureType::Rectangle; + case GL_TEXTURE_CUBE_MAP: + return TextureType::CubeMap; + default: + return TextureType::InvalidEnum; + } +} + +GLenum ToGLenum(TextureType from) +{ + switch (from) + { + case TextureType::_2D: + return GL_TEXTURE_2D; + case TextureType::_2DArray: + return GL_TEXTURE_2D_ARRAY; + case TextureType::_2DMultisample: + return GL_TEXTURE_2D_MULTISAMPLE; + case TextureType::_2DMultisampleArray: + return GL_TEXTURE_2D_MULTISAMPLE_ARRAY_OES; + case TextureType::_3D: + return GL_TEXTURE_3D; + case TextureType::External: + return GL_TEXTURE_EXTERNAL_OES; + case TextureType::Rectangle: + return GL_TEXTURE_RECTANGLE_ANGLE; + case TextureType::CubeMap: + return GL_TEXTURE_CUBE_MAP; + default: + UNREACHABLE(); + return 0; + } +} + +std::ostream &operator<<(std::ostream &os, TextureType value) +{ + switch (value) + { + case TextureType::_2D: + os << "GL_TEXTURE_2D"; + break; + case TextureType::_2DArray: + os << "GL_TEXTURE_2D_ARRAY"; + break; + case TextureType::_2DMultisample: + os << "GL_TEXTURE_2D_MULTISAMPLE"; + break; + case TextureType::_2DMultisampleArray: + os << "GL_TEXTURE_2D_MULTISAMPLE_ARRAY_OES"; + break; + case TextureType::_3D: + os << "GL_TEXTURE_3D"; + break; + case TextureType::External: + os << "GL_TEXTURE_EXTERNAL_OES"; + break; + case TextureType::Rectangle: + os << "GL_TEXTURE_RECTANGLE_ANGLE"; + break; + case TextureType::CubeMap: + os << "GL_TEXTURE_CUBE_MAP"; + break; + default: + os << "GL_INVALID_ENUM"; + break; + } + return os; +} + +template <> +VertexArrayType FromGLenum<VertexArrayType>(GLenum from) +{ + switch (from) + { + case GL_COLOR_ARRAY: + return VertexArrayType::Color; + case GL_NORMAL_ARRAY: + return VertexArrayType::Normal; + case GL_POINT_SIZE_ARRAY_OES: + return VertexArrayType::PointSize; + case GL_TEXTURE_COORD_ARRAY: + return VertexArrayType::TextureCoord; + case GL_VERTEX_ARRAY: + return VertexArrayType::Vertex; + default: + return VertexArrayType::InvalidEnum; + } +} + +GLenum ToGLenum(VertexArrayType from) +{ + switch (from) + { + case VertexArrayType::Color: + return GL_COLOR_ARRAY; + case VertexArrayType::Normal: + return GL_NORMAL_ARRAY; + case VertexArrayType::PointSize: + return GL_POINT_SIZE_ARRAY_OES; + case VertexArrayType::TextureCoord: + return GL_TEXTURE_COORD_ARRAY; + case VertexArrayType::Vertex: + return GL_VERTEX_ARRAY; + default: + UNREACHABLE(); + return 0; + } +} + +std::ostream &operator<<(std::ostream &os, VertexArrayType value) +{ + switch (value) + { + case VertexArrayType::Color: + os << "GL_COLOR_ARRAY"; + break; + case VertexArrayType::Normal: + os << "GL_NORMAL_ARRAY"; + break; + case VertexArrayType::PointSize: + os << "GL_POINT_SIZE_ARRAY_OES"; + break; + case VertexArrayType::TextureCoord: + os << "GL_TEXTURE_COORD_ARRAY"; + break; + case VertexArrayType::Vertex: + os << "GL_VERTEX_ARRAY"; + break; + default: + os << "GL_INVALID_ENUM"; + break; + } + return os; +} + +template <> +WrapMode FromGLenum<WrapMode>(GLenum from) +{ + switch (from) + { + case GL_CLAMP_TO_EDGE: + return WrapMode::ClampToEdge; + case GL_CLAMP_TO_BORDER: + return WrapMode::ClampToBorder; + case GL_MIRRORED_REPEAT: + return WrapMode::MirroredRepeat; + case GL_REPEAT: + return WrapMode::Repeat; + default: + return WrapMode::InvalidEnum; + } +} + +GLenum ToGLenum(WrapMode from) +{ + switch (from) + { + case WrapMode::ClampToEdge: + return GL_CLAMP_TO_EDGE; + case WrapMode::ClampToBorder: + return GL_CLAMP_TO_BORDER; + case WrapMode::MirroredRepeat: + return GL_MIRRORED_REPEAT; + case WrapMode::Repeat: + return GL_REPEAT; + default: + UNREACHABLE(); + return 0; + } +} + +std::ostream &operator<<(std::ostream &os, WrapMode value) +{ + switch (value) + { + case WrapMode::ClampToEdge: + os << "GL_CLAMP_TO_EDGE"; + break; + case WrapMode::ClampToBorder: + os << "GL_CLAMP_TO_BORDER"; + break; + case WrapMode::MirroredRepeat: + os << "GL_MIRRORED_REPEAT"; + break; + case WrapMode::Repeat: + os << "GL_REPEAT"; + break; + default: + os << "GL_INVALID_ENUM"; + break; + } + return os; +} + +} // namespace gl diff --git a/gfx/angle/checkout/src/common/PackedGLEnums_autogen.h b/gfx/angle/checkout/src/common/PackedGLEnums_autogen.h new file mode 100644 index 0000000000..7b6ae2fe54 --- /dev/null +++ b/gfx/angle/checkout/src/common/PackedGLEnums_autogen.h @@ -0,0 +1,581 @@ +// GENERATED FILE - DO NOT EDIT. +// Generated by gen_packed_gl_enums.py using data from packed_gl_enums.json. +// +// Copyright 2019 The ANGLE Project Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// +// PackedGLEnums_autogen.h: +// Declares ANGLE-specific enums classes for GLenums and functions operating +// on them. + +#ifndef COMMON_PACKEDGLENUMS_AUTOGEN_H_ +#define COMMON_PACKEDGLENUMS_AUTOGEN_H_ + +#include <EGL/egl.h> +#include <EGL/eglext.h> +#include <angle_gl.h> + +#include <cstdint> +#include <ostream> + +namespace gl +{ + +template <typename Enum> +Enum FromGLenum(GLenum from); + +enum class AlphaTestFunc : uint8_t +{ + AlwaysPass = 0, + Equal = 1, + Gequal = 2, + Greater = 3, + Lequal = 4, + Less = 5, + Never = 6, + NotEqual = 7, + + InvalidEnum = 8, + EnumCount = 8, +}; + +template <> +AlphaTestFunc FromGLenum<AlphaTestFunc>(GLenum from); +GLenum ToGLenum(AlphaTestFunc from); +std::ostream &operator<<(std::ostream &os, AlphaTestFunc value); + +enum class BufferBinding : uint8_t +{ + Array = 0, + AtomicCounter = 1, + CopyRead = 2, + CopyWrite = 3, + DispatchIndirect = 4, + DrawIndirect = 5, + ElementArray = 6, + PixelPack = 7, + PixelUnpack = 8, + ShaderStorage = 9, + TransformFeedback = 10, + Uniform = 11, + + InvalidEnum = 12, + EnumCount = 12, +}; + +template <> +BufferBinding FromGLenum<BufferBinding>(GLenum from); +GLenum ToGLenum(BufferBinding from); +std::ostream &operator<<(std::ostream &os, BufferBinding value); + +enum class BufferUsage : uint8_t +{ + DynamicCopy = 0, + DynamicDraw = 1, + DynamicRead = 2, + StaticCopy = 3, + StaticDraw = 4, + StaticRead = 5, + StreamCopy = 6, + StreamDraw = 7, + StreamRead = 8, + + InvalidEnum = 9, + EnumCount = 9, +}; + +template <> +BufferUsage FromGLenum<BufferUsage>(GLenum from); +GLenum ToGLenum(BufferUsage from); +std::ostream &operator<<(std::ostream &os, BufferUsage value); + +enum class ClientVertexArrayType : uint8_t +{ + Color = 0, + Normal = 1, + PointSize = 2, + TextureCoord = 3, + Vertex = 4, + + InvalidEnum = 5, + EnumCount = 5, +}; + +template <> +ClientVertexArrayType FromGLenum<ClientVertexArrayType>(GLenum from); +GLenum ToGLenum(ClientVertexArrayType from); +std::ostream &operator<<(std::ostream &os, ClientVertexArrayType value); + +enum class CullFaceMode : uint8_t +{ + Back = 0, + Front = 1, + FrontAndBack = 2, + + InvalidEnum = 3, + EnumCount = 3, +}; + +template <> +CullFaceMode FromGLenum<CullFaceMode>(GLenum from); +GLenum ToGLenum(CullFaceMode from); +std::ostream &operator<<(std::ostream &os, CullFaceMode value); + +enum class FilterMode : uint8_t +{ + Nearest = 0, + Linear = 1, + NearestMipmapNearest = 2, + NearestMipmapLinear = 3, + LinearMipmapLinear = 4, + + InvalidEnum = 5, + EnumCount = 5, +}; + +template <> +FilterMode FromGLenum<FilterMode>(GLenum from); +GLenum ToGLenum(FilterMode from); +std::ostream &operator<<(std::ostream &os, FilterMode value); + +enum class FogMode : uint8_t +{ + Exp = 0, + Exp2 = 1, + Linear = 2, + + InvalidEnum = 3, + EnumCount = 3, +}; + +template <> +FogMode FromGLenum<FogMode>(GLenum from); +GLenum ToGLenum(FogMode from); +std::ostream &operator<<(std::ostream &os, FogMode value); + +enum class GraphicsResetStatus : uint8_t +{ + NoError = 0, + GuiltyContextReset = 1, + InnocentContextReset = 2, + UnknownContextReset = 3, + + InvalidEnum = 4, + EnumCount = 4, +}; + +template <> +GraphicsResetStatus FromGLenum<GraphicsResetStatus>(GLenum from); +GLenum ToGLenum(GraphicsResetStatus from); +std::ostream &operator<<(std::ostream &os, GraphicsResetStatus value); + +enum class HandleType : uint8_t +{ + OpaqueFd = 0, + + InvalidEnum = 1, + EnumCount = 1, +}; + +template <> +HandleType FromGLenum<HandleType>(GLenum from); +GLenum ToGLenum(HandleType from); +std::ostream &operator<<(std::ostream &os, HandleType value); + +enum class HintSetting : uint8_t +{ + DontCare = 0, + Fastest = 1, + Nicest = 2, + + InvalidEnum = 3, + EnumCount = 3, +}; + +template <> +HintSetting FromGLenum<HintSetting>(GLenum from); +GLenum ToGLenum(HintSetting from); +std::ostream &operator<<(std::ostream &os, HintSetting value); + +enum class ImageLayout : uint8_t +{ + Undefined = 0, + General = 1, + ColorAttachment = 2, + DepthStencilAttachment = 3, + DepthStencilReadOnlyAttachment = 4, + ShaderReadOnly = 5, + TransferSrc = 6, + TransferDst = 7, + DepthReadOnlyStencilAttachment = 8, + DepthAttachmentStencilReadOnly = 9, + + InvalidEnum = 10, + EnumCount = 10, +}; + +template <> +ImageLayout FromGLenum<ImageLayout>(GLenum from); +GLenum ToGLenum(ImageLayout from); +std::ostream &operator<<(std::ostream &os, ImageLayout value); + +enum class LightParameter : uint8_t +{ + Ambient = 0, + AmbientAndDiffuse = 1, + ConstantAttenuation = 2, + Diffuse = 3, + LinearAttenuation = 4, + Position = 5, + QuadraticAttenuation = 6, + Specular = 7, + SpotCutoff = 8, + SpotDirection = 9, + SpotExponent = 10, + + InvalidEnum = 11, + EnumCount = 11, +}; + +template <> +LightParameter FromGLenum<LightParameter>(GLenum from); +GLenum ToGLenum(LightParameter from); +std::ostream &operator<<(std::ostream &os, LightParameter value); + +enum class LogicalOperation : uint8_t +{ + And = 0, + AndInverted = 1, + AndReverse = 2, + Clear = 3, + Copy = 4, + CopyInverted = 5, + Equiv = 6, + Invert = 7, + Nand = 8, + Noop = 9, + Nor = 10, + Or = 11, + OrInverted = 12, + OrReverse = 13, + Set = 14, + Xor = 15, + + InvalidEnum = 16, + EnumCount = 16, +}; + +template <> +LogicalOperation FromGLenum<LogicalOperation>(GLenum from); +GLenum ToGLenum(LogicalOperation from); +std::ostream &operator<<(std::ostream &os, LogicalOperation value); + +enum class MaterialParameter : uint8_t +{ + Ambient = 0, + AmbientAndDiffuse = 1, + Diffuse = 2, + Emission = 3, + Shininess = 4, + Specular = 5, + + InvalidEnum = 6, + EnumCount = 6, +}; + +template <> +MaterialParameter FromGLenum<MaterialParameter>(GLenum from); +GLenum ToGLenum(MaterialParameter from); +std::ostream &operator<<(std::ostream &os, MaterialParameter value); + +enum class MatrixType : uint8_t +{ + Modelview = 0, + Projection = 1, + Texture = 2, + + InvalidEnum = 3, + EnumCount = 3, +}; + +template <> +MatrixType FromGLenum<MatrixType>(GLenum from); +GLenum ToGLenum(MatrixType from); +std::ostream &operator<<(std::ostream &os, MatrixType value); + +enum class PointParameter : uint8_t +{ + PointSizeMin = 0, + PointSizeMax = 1, + PointFadeThresholdSize = 2, + PointDistanceAttenuation = 3, + + InvalidEnum = 4, + EnumCount = 4, +}; + +template <> +PointParameter FromGLenum<PointParameter>(GLenum from); +GLenum ToGLenum(PointParameter from); +std::ostream &operator<<(std::ostream &os, PointParameter value); + +enum class ProvokingVertexConvention : uint8_t +{ + FirstVertexConvention = 0, + LastVertexConvention = 1, + + InvalidEnum = 2, + EnumCount = 2, +}; + +template <> +ProvokingVertexConvention FromGLenum<ProvokingVertexConvention>(GLenum from); +GLenum ToGLenum(ProvokingVertexConvention from); +std::ostream &operator<<(std::ostream &os, ProvokingVertexConvention value); + +enum class QueryType : uint8_t +{ + AnySamples = 0, + AnySamplesConservative = 1, + CommandsCompleted = 2, + PrimitivesGenerated = 3, + TimeElapsed = 4, + Timestamp = 5, + TransformFeedbackPrimitivesWritten = 6, + + InvalidEnum = 7, + EnumCount = 7, +}; + +template <> +QueryType FromGLenum<QueryType>(GLenum from); +GLenum ToGLenum(QueryType from); +std::ostream &operator<<(std::ostream &os, QueryType value); + +enum class ShaderType : uint8_t +{ + Vertex = 0, + Fragment = 1, + Geometry = 2, + Compute = 3, + + InvalidEnum = 4, + EnumCount = 4, +}; + +template <> +ShaderType FromGLenum<ShaderType>(GLenum from); +GLenum ToGLenum(ShaderType from); +std::ostream &operator<<(std::ostream &os, ShaderType value); + +enum class ShadingModel : uint8_t +{ + Flat = 0, + Smooth = 1, + + InvalidEnum = 2, + EnumCount = 2, +}; + +template <> +ShadingModel FromGLenum<ShadingModel>(GLenum from); +GLenum ToGLenum(ShadingModel from); +std::ostream &operator<<(std::ostream &os, ShadingModel value); + +enum class TextureCombine : uint8_t +{ + Add = 0, + AddSigned = 1, + Dot3Rgb = 2, + Dot3Rgba = 3, + Interpolate = 4, + Modulate = 5, + Replace = 6, + Subtract = 7, + + InvalidEnum = 8, + EnumCount = 8, +}; + +template <> +TextureCombine FromGLenum<TextureCombine>(GLenum from); +GLenum ToGLenum(TextureCombine from); +std::ostream &operator<<(std::ostream &os, TextureCombine value); + +enum class TextureEnvMode : uint8_t +{ + Add = 0, + Blend = 1, + Combine = 2, + Decal = 3, + Modulate = 4, + Replace = 5, + + InvalidEnum = 6, + EnumCount = 6, +}; + +template <> +TextureEnvMode FromGLenum<TextureEnvMode>(GLenum from); +GLenum ToGLenum(TextureEnvMode from); +std::ostream &operator<<(std::ostream &os, TextureEnvMode value); + +enum class TextureEnvParameter : uint8_t +{ + Mode = 0, + Color = 1, + CombineRgb = 2, + CombineAlpha = 3, + RgbScale = 4, + AlphaScale = 5, + Src0Rgb = 6, + Src1Rgb = 7, + Src2Rgb = 8, + Src0Alpha = 9, + Src1Alpha = 10, + Src2Alpha = 11, + Op0Rgb = 12, + Op1Rgb = 13, + Op2Rgb = 14, + Op0Alpha = 15, + Op1Alpha = 16, + Op2Alpha = 17, + PointCoordReplace = 18, + + InvalidEnum = 19, + EnumCount = 19, +}; + +template <> +TextureEnvParameter FromGLenum<TextureEnvParameter>(GLenum from); +GLenum ToGLenum(TextureEnvParameter from); +std::ostream &operator<<(std::ostream &os, TextureEnvParameter value); + +enum class TextureEnvTarget : uint8_t +{ + Env = 0, + PointSprite = 1, + + InvalidEnum = 2, + EnumCount = 2, +}; + +template <> +TextureEnvTarget FromGLenum<TextureEnvTarget>(GLenum from); +GLenum ToGLenum(TextureEnvTarget from); +std::ostream &operator<<(std::ostream &os, TextureEnvTarget value); + +enum class TextureOp : uint8_t +{ + OneMinusSrcAlpha = 0, + OneMinusSrcColor = 1, + SrcAlpha = 2, + SrcColor = 3, + + InvalidEnum = 4, + EnumCount = 4, +}; + +template <> +TextureOp FromGLenum<TextureOp>(GLenum from); +GLenum ToGLenum(TextureOp from); +std::ostream &operator<<(std::ostream &os, TextureOp value); + +enum class TextureSrc : uint8_t +{ + Constant = 0, + Previous = 1, + PrimaryColor = 2, + Texture = 3, + + InvalidEnum = 4, + EnumCount = 4, +}; + +template <> +TextureSrc FromGLenum<TextureSrc>(GLenum from); +GLenum ToGLenum(TextureSrc from); +std::ostream &operator<<(std::ostream &os, TextureSrc value); + +enum class TextureTarget : uint8_t +{ + _2D = 0, + _2DArray = 1, + _2DMultisample = 2, + _2DMultisampleArray = 3, + _3D = 4, + External = 5, + Rectangle = 6, + CubeMapPositiveX = 7, + CubeMapNegativeX = 8, + CubeMapPositiveY = 9, + CubeMapNegativeY = 10, + CubeMapPositiveZ = 11, + CubeMapNegativeZ = 12, + + InvalidEnum = 13, + EnumCount = 13, +}; + +template <> +TextureTarget FromGLenum<TextureTarget>(GLenum from); +GLenum ToGLenum(TextureTarget from); +std::ostream &operator<<(std::ostream &os, TextureTarget value); + +enum class TextureType : uint8_t +{ + _2D = 0, + _2DArray = 1, + _2DMultisample = 2, + _2DMultisampleArray = 3, + _3D = 4, + External = 5, + Rectangle = 6, + CubeMap = 7, + + InvalidEnum = 8, + EnumCount = 8, +}; + +template <> +TextureType FromGLenum<TextureType>(GLenum from); +GLenum ToGLenum(TextureType from); +std::ostream &operator<<(std::ostream &os, TextureType value); + +enum class VertexArrayType : uint8_t +{ + Color = 0, + Normal = 1, + PointSize = 2, + TextureCoord = 3, + Vertex = 4, + + InvalidEnum = 5, + EnumCount = 5, +}; + +template <> +VertexArrayType FromGLenum<VertexArrayType>(GLenum from); +GLenum ToGLenum(VertexArrayType from); +std::ostream &operator<<(std::ostream &os, VertexArrayType value); + +enum class WrapMode : uint8_t +{ + ClampToEdge = 0, + ClampToBorder = 1, + MirroredRepeat = 2, + Repeat = 3, + + InvalidEnum = 4, + EnumCount = 4, +}; + +template <> +WrapMode FromGLenum<WrapMode>(GLenum from); +GLenum ToGLenum(WrapMode from); +std::ostream &operator<<(std::ostream &os, WrapMode value); + +} // namespace gl + +#endif // COMMON_PACKEDGLENUMS_AUTOGEN_H_ diff --git a/gfx/angle/checkout/src/common/PoolAlloc.cpp b/gfx/angle/checkout/src/common/PoolAlloc.cpp new file mode 100644 index 0000000000..b6e3702f8f --- /dev/null +++ b/gfx/angle/checkout/src/common/PoolAlloc.cpp @@ -0,0 +1,334 @@ +// +// Copyright 2019 The ANGLE Project Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// +// PoolAlloc.cpp: +// Implements the class methods for PoolAllocator and Allocation classes. +// + +#include "common/PoolAlloc.h" + +#include <assert.h> +#include <stdint.h> +#include <stdio.h> + +#include "common/angleutils.h" +#include "common/debug.h" +#include "common/mathutil.h" +#include "common/platform.h" +#include "common/tls.h" + +namespace angle +{ + +// +// Implement the functionality of the PoolAllocator class, which +// is documented in PoolAlloc.h. +// +PoolAllocator::PoolAllocator(int growthIncrement, int allocationAlignment) + : mAlignment(allocationAlignment), +#if !defined(ANGLE_DISABLE_POOL_ALLOC) + mPageSize(growthIncrement), + mFreeList(0), + mInUseList(0), + mNumCalls(0), + mTotalBytes(0), +#endif + mLocked(false) +{ +#if !defined(ANGLE_DISABLE_POOL_ALLOC) + if (mAlignment == 1) + { + // This is a special fast-path where fastAllocation() is enabled + mAlignmentMask = 0; + mHeaderSkip = sizeof(Header); + } + else + { +#endif + // + // Adjust mAlignment to be at least pointer aligned and + // power of 2. + // + size_t minAlign = sizeof(void *); + mAlignment &= ~(minAlign - 1); + if (mAlignment < minAlign) + mAlignment = minAlign; + mAlignment = gl::ceilPow2(mAlignment); + mAlignmentMask = mAlignment - 1; + +#if !defined(ANGLE_DISABLE_POOL_ALLOC) + // + // Align header skip + // + mHeaderSkip = minAlign; + if (mHeaderSkip < sizeof(Header)) + { + mHeaderSkip = rx::roundUp(sizeof(Header), mAlignment); + } + } + // + // Don't allow page sizes we know are smaller than all common + // OS page sizes. + // + if (mPageSize < 4 * 1024) + mPageSize = 4 * 1024; + // + // A large mCurrentPageOffset indicates a new page needs to + // be obtained to allocate memory. + // + mCurrentPageOffset = mPageSize; +#else // !defined(ANGLE_DISABLE_POOL_ALLOC) + mStack.push_back({}); +#endif +} + +PoolAllocator::~PoolAllocator() +{ +#if !defined(ANGLE_DISABLE_POOL_ALLOC) + while (mInUseList) + { + Header *next = mInUseList->nextPage; + mInUseList->~Header(); + delete[] reinterpret_cast<char *>(mInUseList); + mInUseList = next; + } + // We should not check the guard blocks + // here, because we did it already when the block was + // placed into the free list. + // + while (mFreeList) + { + Header *next = mFreeList->nextPage; + delete[] reinterpret_cast<char *>(mFreeList); + mFreeList = next; + } +#else // !defined(ANGLE_DISABLE_POOL_ALLOC) + for (auto &allocs : mStack) + { + for (auto alloc : allocs) + { + free(alloc); + } + } + mStack.clear(); +#endif +} + +// +// Check a single guard block for damage +// +void Allocation::checkGuardBlock(unsigned char *blockMem, + unsigned char val, + const char *locText) const +{ +#if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS) + for (size_t x = 0; x < kGuardBlockSize; x++) + { + if (blockMem[x] != val) + { + char assertMsg[80]; + // We don't print the assert message. It's here just to be helpful. + snprintf(assertMsg, sizeof(assertMsg), + "PoolAlloc: Damage %s %zu byte allocation at 0x%p\n", locText, mSize, data()); + assert(0 && "PoolAlloc: Damage in guard block"); + } + } +#endif +} + +void PoolAllocator::push() +{ +#if !defined(ANGLE_DISABLE_POOL_ALLOC) + AllocState state = {mCurrentPageOffset, mInUseList}; + + mStack.push_back(state); + + // + // Indicate there is no current page to allocate from. + // + mCurrentPageOffset = mPageSize; +#else // !defined(ANGLE_DISABLE_POOL_ALLOC) + mStack.push_back({}); +#endif +} + +// +// Do a mass-deallocation of all the individual allocations +// that have occurred since the last push(), or since the +// last pop(), or since the object's creation. +// +// The deallocated pages are saved for future allocations. +// +void PoolAllocator::pop() +{ + if (mStack.size() < 1) + return; + +#if !defined(ANGLE_DISABLE_POOL_ALLOC) + Header *page = mStack.back().page; + mCurrentPageOffset = mStack.back().offset; + + while (mInUseList != page) + { + // invoke destructor to free allocation list + mInUseList->~Header(); + + Header *nextInUse = mInUseList->nextPage; + if (mInUseList->pageCount > 1) + delete[] reinterpret_cast<char *>(mInUseList); + else + { + mInUseList->nextPage = mFreeList; + mFreeList = mInUseList; + } + mInUseList = nextInUse; + } + + mStack.pop_back(); +#else // !defined(ANGLE_DISABLE_POOL_ALLOC) + for (auto &alloc : mStack.back()) + { + free(alloc); + } + mStack.pop_back(); +#endif +} + +// +// Do a mass-deallocation of all the individual allocations +// that have occurred. +// +void PoolAllocator::popAll() +{ + while (mStack.size() > 0) + pop(); +} + +void *PoolAllocator::allocate(size_t numBytes) +{ + ASSERT(!mLocked); + +#if !defined(ANGLE_DISABLE_POOL_ALLOC) + // + // Just keep some interesting statistics. + // + ++mNumCalls; + mTotalBytes += numBytes; + + // If we are using guard blocks, all allocations are bracketed by + // them: [guardblock][allocation][guardblock]. numBytes is how + // much memory the caller asked for. allocationSize is the total + // size including guard blocks. In release build, + // kGuardBlockSize=0 and this all gets optimized away. + size_t allocationSize = Allocation::AllocationSize(numBytes) + mAlignment; + // Detect integer overflow. + if (allocationSize < numBytes) + return 0; + + // + // Do the allocation, most likely case first, for efficiency. + // This step could be moved to be inline sometime. + // + if (allocationSize <= mPageSize - mCurrentPageOffset) + { + // + // Safe to allocate from mCurrentPageOffset. + // + unsigned char *memory = reinterpret_cast<unsigned char *>(mInUseList) + mCurrentPageOffset; + mCurrentPageOffset += allocationSize; + mCurrentPageOffset = (mCurrentPageOffset + mAlignmentMask) & ~mAlignmentMask; + + return initializeAllocation(mInUseList, memory, numBytes); + } + + if (allocationSize > mPageSize - mHeaderSkip) + { + // + // Do a multi-page allocation. Don't mix these with the others. + // The OS is efficient in allocating and freeing multiple pages. + // + size_t numBytesToAlloc = allocationSize + mHeaderSkip; + // Detect integer overflow. + if (numBytesToAlloc < allocationSize) + return 0; + + Header *memory = reinterpret_cast<Header *>(::new char[numBytesToAlloc]); + if (memory == 0) + return 0; + + // Use placement-new to initialize header + new (memory) Header(mInUseList, (numBytesToAlloc + mPageSize - 1) / mPageSize); + mInUseList = memory; + + mCurrentPageOffset = mPageSize; // make next allocation come from a new page + + // No guard blocks for multi-page allocations (yet) + void *unalignedPtr = + reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(memory) + mHeaderSkip); + return std::align(mAlignment, numBytes, unalignedPtr, allocationSize); + } + unsigned char *newPageAddr = + static_cast<unsigned char *>(allocateNewPage(numBytes, allocationSize)); + return initializeAllocation(mInUseList, newPageAddr, numBytes); +#else // !defined(ANGLE_DISABLE_POOL_ALLOC) + void *alloc = malloc(numBytes + mAlignmentMask); + mStack.back().push_back(alloc); + + intptr_t intAlloc = reinterpret_cast<intptr_t>(alloc); + intAlloc = (intAlloc + mAlignmentMask) & ~mAlignmentMask; + return reinterpret_cast<void *>(intAlloc); +#endif +} + +#if !defined(ANGLE_DISABLE_POOL_ALLOC) +void *PoolAllocator::allocateNewPage(size_t numBytes, size_t allocationSize) +{ + // + // Need a simple page to allocate from. + // + Header *memory; + if (mFreeList) + { + memory = mFreeList; + mFreeList = mFreeList->nextPage; + } + else + { + memory = reinterpret_cast<Header *>(::new char[mPageSize]); + if (memory == 0) + return 0; + } + // Use placement-new to initialize header + new (memory) Header(mInUseList, 1); + mInUseList = memory; + + unsigned char *ret = reinterpret_cast<unsigned char *>(mInUseList) + mHeaderSkip; + mCurrentPageOffset = (mHeaderSkip + allocationSize + mAlignmentMask) & ~mAlignmentMask; + return ret; +} +#endif + +void PoolAllocator::lock() +{ + ASSERT(!mLocked); + mLocked = true; +} + +void PoolAllocator::unlock() +{ + ASSERT(mLocked); + mLocked = false; +} + +// +// Check all allocations in a list for damage by calling check on each. +// +void Allocation::checkAllocList() const +{ + for (const Allocation *alloc = this; alloc != 0; alloc = alloc->mPrevAlloc) + alloc->check(); +} + +} // namespace angle
\ No newline at end of file diff --git a/gfx/angle/checkout/src/common/PoolAlloc.h b/gfx/angle/checkout/src/common/PoolAlloc.h new file mode 100644 index 0000000000..1c902bfdd7 --- /dev/null +++ b/gfx/angle/checkout/src/common/PoolAlloc.h @@ -0,0 +1,276 @@ +// +// Copyright 2019 The ANGLE Project Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// +// PoolAlloc.h: +// Defines the class interface for PoolAllocator and the Allocation +// class that it uses internally. +// + +#ifndef COMMON_POOLALLOC_H_ +#define COMMON_POOLALLOC_H_ + +#if !defined(NDEBUG) +# define ANGLE_POOL_ALLOC_GUARD_BLOCKS // define to enable guard block sanity checking +#endif + +// +// This header defines an allocator that can be used to efficiently +// allocate a large number of small requests for heap memory, with the +// intention that they are not individually deallocated, but rather +// collectively deallocated at one time. +// +// This simultaneously +// +// * Makes each individual allocation much more efficient; the +// typical allocation is trivial. +// * Completely avoids the cost of doing individual deallocation. +// * Saves the trouble of tracking down and plugging a large class of leaks. +// +// Individual classes can use this allocator by supplying their own +// new and delete methods. +// + +#include <stddef.h> +#include <string.h> +#include <memory> +#include <vector> + +#include "angleutils.h" +#include "common/debug.h" + +namespace angle +{ +// If we are using guard blocks, we must track each individual +// allocation. If we aren't using guard blocks, these +// never get instantiated, so won't have any impact. +// + +class Allocation +{ + public: + Allocation(size_t size, unsigned char *mem, Allocation *prev = 0) + : mSize(size), mMem(mem), mPrevAlloc(prev) + { +// Allocations are bracketed: +// [allocationHeader][initialGuardBlock][userData][finalGuardBlock] +// This would be cleaner with if (kGuardBlockSize)..., but that +// makes the compiler print warnings about 0 length memsets, +// even with the if() protecting them. +#if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS) + memset(preGuard(), kGuardBlockBeginVal, kGuardBlockSize); + memset(data(), kUserDataFill, mSize); + memset(postGuard(), kGuardBlockEndVal, kGuardBlockSize); +#endif + } + + void check() const + { + checkGuardBlock(preGuard(), kGuardBlockBeginVal, "before"); + checkGuardBlock(postGuard(), kGuardBlockEndVal, "after"); + } + + void checkAllocList() const; + + // Return total size needed to accommodate user buffer of 'size', + // plus our tracking data. + static size_t AllocationSize(size_t size) { return size + 2 * kGuardBlockSize + HeaderSize(); } + + // Offset from surrounding buffer to get to user data buffer. + static unsigned char *OffsetAllocation(unsigned char *m) + { + return m + kGuardBlockSize + HeaderSize(); + } + + private: + void checkGuardBlock(unsigned char *blockMem, unsigned char val, const char *locText) const; + + // Find offsets to pre and post guard blocks, and user data buffer + unsigned char *preGuard() const { return mMem + HeaderSize(); } + unsigned char *data() const { return preGuard() + kGuardBlockSize; } + unsigned char *postGuard() const { return data() + mSize; } + size_t mSize; // size of the user data area + unsigned char *mMem; // beginning of our allocation (pts to header) + Allocation *mPrevAlloc; // prior allocation in the chain + + static constexpr unsigned char kGuardBlockBeginVal = 0xfb; + static constexpr unsigned char kGuardBlockEndVal = 0xfe; + static constexpr unsigned char kUserDataFill = 0xcd; +#if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS) + static constexpr size_t kGuardBlockSize = 16; + static constexpr size_t HeaderSize() { return sizeof(Allocation); } +#else + static constexpr size_t kGuardBlockSize = 0; + static constexpr size_t HeaderSize() { return 0; } +#endif +}; + +// +// There are several stacks. One is to track the pushing and popping +// of the user, and not yet implemented. The others are simply a +// repositories of free pages or used pages. +// +// Page stacks are linked together with a simple header at the beginning +// of each allocation obtained from the underlying OS. Multi-page allocations +// are returned to the OS. Individual page allocations are kept for future +// re-use. +// +// The "page size" used is not, nor must it match, the underlying OS +// page size. But, having it be about that size or equal to a set of +// pages is likely most optimal. +// +class PoolAllocator : angle::NonCopyable +{ + public: + static const int kDefaultAlignment = 16; + // + // Create PoolAllocator. If alignment is be set to 1 byte then fastAllocate() + // function can be used to make allocations with less overhead. + // + PoolAllocator(int growthIncrement = 8 * 1024, int allocationAlignment = kDefaultAlignment); + + // + // Don't call the destructor just to free up the memory, call pop() + // + ~PoolAllocator(); + + // + // Call push() to establish a new place to pop memory to. Does not + // have to be called to get things started. + // + void push(); + + // + // Call pop() to free all memory allocated since the last call to push(), + // or if no last call to push, frees all memory since first allocation. + // + void pop(); + + // + // Call popAll() to free all memory allocated. + // + void popAll(); + + // + // Call allocate() to actually acquire memory. Returns 0 if no memory + // available, otherwise a properly aligned pointer to 'numBytes' of memory. + // + void *allocate(size_t numBytes); + + // + // Call fastAllocate() for a faster allocate function that does minimal bookkeeping + // preCondition: Allocator must have been created w/ alignment of 1 + ANGLE_INLINE uint8_t *fastAllocate(size_t numBytes) + { +#if defined(ANGLE_DISABLE_POOL_ALLOC) + return reinterpret_cast<uint8_t *>(allocate(numBytes)); +#else + ASSERT(mAlignment == 1); + // No multi-page allocations + ASSERT(numBytes <= (mPageSize - mHeaderSkip)); + // + // Do the allocation, most likely case inline first, for efficiency. + // + if (numBytes <= mPageSize - mCurrentPageOffset) + { + // + // Safe to allocate from mCurrentPageOffset. + // + uint8_t *memory = reinterpret_cast<uint8_t *>(mInUseList) + mCurrentPageOffset; + mCurrentPageOffset += numBytes; + return memory; + } + return reinterpret_cast<uint8_t *>(allocateNewPage(numBytes, numBytes)); +#endif + } + + // + // There is no deallocate. The point of this class is that + // deallocation can be skipped by the user of it, as the model + // of use is to simultaneously deallocate everything at once + // by calling pop(), and to not have to solve memory leak problems. + // + + // Catch unwanted allocations. + // TODO(jmadill): Remove this when we remove the global allocator. + void lock(); + void unlock(); + + private: + size_t mAlignment; // all returned allocations will be aligned at + // this granularity, which will be a power of 2 + size_t mAlignmentMask; +#if !defined(ANGLE_DISABLE_POOL_ALLOC) + friend struct Header; + + struct Header + { + Header(Header *nextPage, size_t pageCount) + : nextPage(nextPage), + pageCount(pageCount) +# if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS) + , + lastAllocation(0) +# endif + {} + + ~Header() + { +# if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS) + if (lastAllocation) + lastAllocation->checkAllocList(); +# endif + } + + Header *nextPage; + size_t pageCount; +# if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS) + Allocation *lastAllocation; +# endif + }; + + struct AllocState + { + size_t offset; + Header *page; + }; + using AllocStack = std::vector<AllocState>; + + // Slow path of allocation when we have to get a new page. + void *allocateNewPage(size_t numBytes, size_t allocationSize); + // Track allocations if and only if we're using guard blocks + void *initializeAllocation(Header *block, unsigned char *memory, size_t numBytes) + { +# if defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS) + new (memory) Allocation(numBytes + mAlignment, memory, block->lastAllocation); + block->lastAllocation = reinterpret_cast<Allocation *>(memory); +# endif + // The OffsetAllocation() call is optimized away if !defined(ANGLE_POOL_ALLOC_GUARD_BLOCKS) + void *unalignedPtr = Allocation::OffsetAllocation(memory); + size_t alignedBytes = numBytes + mAlignment; + return std::align(mAlignment, numBytes, unalignedPtr, alignedBytes); + } + + size_t mPageSize; // granularity of allocation from the OS + size_t mHeaderSkip; // amount of memory to skip to make room for the + // header (basically, size of header, rounded + // up to make it aligned + size_t mCurrentPageOffset; // next offset in top of inUseList to allocate from + Header *mFreeList; // list of popped memory + Header *mInUseList; // list of all memory currently being used + AllocStack mStack; // stack of where to allocate from, to partition pool + + int mNumCalls; // just an interesting statistic + size_t mTotalBytes; // just an interesting statistic + +#else // !defined(ANGLE_DISABLE_POOL_ALLOC) + std::vector<std::vector<void *>> mStack; +#endif + + bool mLocked; +}; + +} // namespace angle + +#endif // COMMON_POOLALLOC_H_ diff --git a/gfx/angle/checkout/src/common/aligned_memory.cpp b/gfx/angle/checkout/src/common/aligned_memory.cpp new file mode 100644 index 0000000000..70a730ef62 --- /dev/null +++ b/gfx/angle/checkout/src/common/aligned_memory.cpp @@ -0,0 +1,64 @@ +// +// Copyright 2017 The ANGLE Project Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// +// aligned_memory: An aligned memory allocator. Based on Chrome's base/memory/aligned_memory. +// + +#include "common/aligned_memory.h" + +#include "common/debug.h" +#include "common/platform.h" + +#if defined(COMPILER_MSVC) +# include <malloc.h> +#else +# include <stdlib.h> +#endif + +namespace angle +{ + +void *AlignedAlloc(size_t size, size_t alignment) +{ + ASSERT(size > 0); + ASSERT((alignment & (alignment - 1)) == 0); + ASSERT((alignment % sizeof(void *)) == 0); + void *ptr = nullptr; +#if defined(ANGLE_PLATFORM_WINDOWS) + ptr = _aligned_malloc(size, alignment); +// Android technically supports posix_memalign(), but does not expose it in +// the current version of the library headers used by Chrome. Luckily, +// memalign() on Android returns pointers which can safely be used with +// free(), so we can use it instead. Issue filed to document this: +// http://code.google.com/p/android/issues/detail?id=35391 +#elif defined(ANGLE_PLATFORM_ANDROID) + ptr = memalign(alignment, size); +#else + if (posix_memalign(&ptr, alignment, size)) + ptr = nullptr; +#endif + // Since aligned allocations may fail for non-memory related reasons, force a + // crash if we encounter a failed allocation. + if (!ptr) + { + ERR() << "If you crashed here, your aligned allocation is incorrect: " + << "size=" << size << ", alignment=" << alignment; + ASSERT(false); + } + // Sanity check alignment just to be safe. + ASSERT((reinterpret_cast<uintptr_t>(ptr) & (alignment - 1)) == 0); + return ptr; +} + +void AlignedFree(void *ptr) +{ +#if defined(_MSC_VER) + _aligned_free(ptr); +#else + free(ptr); +#endif +} + +} // namespace angle diff --git a/gfx/angle/checkout/src/common/aligned_memory.h b/gfx/angle/checkout/src/common/aligned_memory.h new file mode 100644 index 0000000000..dcbb60d1cb --- /dev/null +++ b/gfx/angle/checkout/src/common/aligned_memory.h @@ -0,0 +1,23 @@ +// +// Copyright 2017 The ANGLE Project Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// +// aligned_memory: An aligned memory allocator. Based on Chrome's base/memory/aligned_memory. +// + +#ifndef COMMON_ALIGNED_MEMORY_H_ +#define COMMON_ALIGNED_MEMORY_H_ + +#include <cstddef> + +namespace angle +{ + +// This can be replaced with std::aligned_malloc when we have C++17. +void *AlignedAlloc(size_t size, size_t alignment); +void AlignedFree(void *ptr); + +} // namespace angle + +#endif // COMMON_ALIGNED_MEMORY_H_ diff --git a/gfx/angle/checkout/src/common/android_util.cpp b/gfx/angle/checkout/src/common/android_util.cpp new file mode 100644 index 0000000000..38e0d4fbe3 --- /dev/null +++ b/gfx/angle/checkout/src/common/android_util.cpp @@ -0,0 +1,268 @@ +// +// Copyright 2018 The ANGLE Project Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// + +// android_util.cpp: Utilities for the using the Android platform + +#include "common/android_util.h" + +#include <cstdint> + +// Taken from cutils/native_handle.h: +// https://android.googlesource.com/platform/system/core/+/master/libcutils/include/cutils/native_handle.h +typedef struct native_handle +{ + int version; /* sizeof(native_handle_t) */ + int numFds; /* number of file-descriptors at &data[0] */ + int numInts; /* number of ints at &data[numFds] */ + int data[0]; /* numFds + numInts ints */ +} native_handle_t; + +// Taken from nativebase/nativebase.h +// https://android.googlesource.com/platform/frameworks/native/+/master/libs/nativebase/include/nativebase/nativebase.h +typedef const native_handle_t *buffer_handle_t; + +typedef struct android_native_base_t +{ + /* a magic value defined by the actual EGL native type */ + int magic; + /* the sizeof() of the actual EGL native type */ + int version; + void *reserved[4]; + /* reference-counting interface */ + void (*incRef)(struct android_native_base_t *base); + void (*decRef)(struct android_native_base_t *base); +} android_native_base_t; + +typedef struct ANativeWindowBuffer +{ + struct android_native_base_t common; + int width; + int height; + int stride; + int format; + int usage_deprecated; + uintptr_t layerCount; + void *reserved[1]; + const native_handle_t *handle; + uint64_t usage; + // we needed extra space for storing the 64-bits usage flags + // the number of slots to use from reserved_proc depends on the + // architecture. + void *reserved_proc[8 - (sizeof(uint64_t) / sizeof(void *))]; +} ANativeWindowBuffer_t; + +// Taken from android/hardware_buffer.h +// https://android.googlesource.com/platform/frameworks/native/+/master/libs/nativewindow/include/android/hardware_buffer.h + +// AHARDWAREBUFFER_FORMAT_B8G8R8A8_UNORM AHARDWAREBUFFER_FORMAT_B4G4R4A4_UNORM, +// AHARDWAREBUFFER_FORMAT_B5G5R5A1_UNORM formats were deprecated and re-added explicitly. + +// clang-format off +/** + * Buffer pixel formats. + */ +enum { + /** + * Corresponding formats: + * Vulkan: VK_FORMAT_R8G8B8A8_UNORM + * OpenGL ES: GL_RGBA8 + */ + AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM = 1, + + /** + * 32 bits per pixel, 8 bits per channel format where alpha values are + * ignored (always opaque). + * Corresponding formats: + * Vulkan: VK_FORMAT_R8G8B8A8_UNORM + * OpenGL ES: GL_RGB8 + */ + AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM = 2, + + /** + * Corresponding formats: + * Vulkan: VK_FORMAT_R8G8B8_UNORM + * OpenGL ES: GL_RGB8 + */ + AHARDWAREBUFFER_FORMAT_R8G8B8_UNORM = 3, + + /** + * Corresponding formats: + * Vulkan: VK_FORMAT_R5G6B5_UNORM_PACK16 + * OpenGL ES: GL_RGB565 + */ + AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM = 4, + + AHARDWAREBUFFER_FORMAT_B8G8R8A8_UNORM = 5, + AHARDWAREBUFFER_FORMAT_B5G5R5A1_UNORM = 6, + AHARDWAREBUFFER_FORMAT_B4G4R4A4_UNORM = 7, + /** + * Corresponding formats: + * Vulkan: VK_FORMAT_R16G16B16A16_SFLOAT + * OpenGL ES: GL_RGBA16F + */ + AHARDWAREBUFFER_FORMAT_R16G16B16A16_FLOAT = 0x16, + + /** + * Corresponding formats: + * Vulkan: VK_FORMAT_A2B10G10R10_UNORM_PACK32 + * OpenGL ES: GL_RGB10_A2 + */ + AHARDWAREBUFFER_FORMAT_R10G10B10A2_UNORM = 0x2b, + + /** + * An opaque binary blob format that must have height 1, with width equal to + * the buffer size in bytes. + */ + AHARDWAREBUFFER_FORMAT_BLOB = 0x21, + + /** + * Corresponding formats: + * Vulkan: VK_FORMAT_D16_UNORM + * OpenGL ES: GL_DEPTH_COMPONENT16 + */ + AHARDWAREBUFFER_FORMAT_D16_UNORM = 0x30, + + /** + * Corresponding formats: + * Vulkan: VK_FORMAT_X8_D24_UNORM_PACK32 + * OpenGL ES: GL_DEPTH_COMPONENT24 + */ + AHARDWAREBUFFER_FORMAT_D24_UNORM = 0x31, + + /** + * Corresponding formats: + * Vulkan: VK_FORMAT_D24_UNORM_S8_UINT + * OpenGL ES: GL_DEPTH24_STENCIL8 + */ + AHARDWAREBUFFER_FORMAT_D24_UNORM_S8_UINT = 0x32, + + /** + * Corresponding formats: + * Vulkan: VK_FORMAT_D32_SFLOAT + * OpenGL ES: GL_DEPTH_COMPONENT32F + */ + AHARDWAREBUFFER_FORMAT_D32_FLOAT = 0x33, + + /** + * Corresponding formats: + * Vulkan: VK_FORMAT_D32_SFLOAT_S8_UINT + * OpenGL ES: GL_DEPTH32F_STENCIL8 + */ + AHARDWAREBUFFER_FORMAT_D32_FLOAT_S8_UINT = 0x34, + + /** + * Corresponding formats: + * Vulkan: VK_FORMAT_S8_UINT + * OpenGL ES: GL_STENCIL_INDEX8 + */ + AHARDWAREBUFFER_FORMAT_S8_UINT = 0x35, +}; +// clang-format on + +namespace +{ + +// In the Android system: +// - AHardwareBuffer is essentially a typedef of GraphicBuffer. Conversion functions simply +// reinterpret_cast. +// - GraphicBuffer inherits from two base classes, ANativeWindowBuffer and RefBase. +// +// GraphicBuffer implements a getter for ANativeWindowBuffer (getNativeBuffer) by static_casting +// itself to its base class ANativeWindowBuffer. The offset of the ANativeWindowBuffer pointer +// from the GraphicBuffer pointer is 16 bytes. This is likely due to two pointers: The vtable of +// GraphicBuffer and the one pointer member of the RefBase class. +// +// This is not future proof at all. We need to look into getting utilities added to Android to +// perform this cast for us. +constexpr int kAHardwareBufferToANativeWindowBufferOffset = static_cast<int>(sizeof(void *)) * 2; + +template <typename T1, typename T2> +T1 *offsetPointer(T2 *ptr, int bytes) +{ + return reinterpret_cast<T1 *>(reinterpret_cast<intptr_t>(ptr) + bytes); +} + +} // anonymous namespace + +namespace angle +{ + +namespace android +{ + +ANativeWindowBuffer *ClientBufferToANativeWindowBuffer(EGLClientBuffer clientBuffer) +{ + return reinterpret_cast<ANativeWindowBuffer *>(clientBuffer); +} + +void GetANativeWindowBufferProperties(const ANativeWindowBuffer *buffer, + int *width, + int *height, + int *depth, + int *pixelFormat) +{ + *width = buffer->width; + *height = buffer->height; + *depth = static_cast<int>(buffer->layerCount); + *height = buffer->height; + *pixelFormat = buffer->format; +} + +GLenum NativePixelFormatToGLInternalFormat(int pixelFormat) +{ + switch (pixelFormat) + { + case AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM: + return GL_RGBA8; + case AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM: + return GL_RGB8; + case AHARDWAREBUFFER_FORMAT_R8G8B8_UNORM: + return GL_RGB8; + case AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM: + return GL_RGB565; + case AHARDWAREBUFFER_FORMAT_B8G8R8A8_UNORM: + return GL_BGRA8_EXT; + case AHARDWAREBUFFER_FORMAT_B5G5R5A1_UNORM: + return GL_RGB5_A1; + case AHARDWAREBUFFER_FORMAT_B4G4R4A4_UNORM: + return GL_RGBA4; + case AHARDWAREBUFFER_FORMAT_R16G16B16A16_FLOAT: + return GL_RGBA16F; + case AHARDWAREBUFFER_FORMAT_R10G10B10A2_UNORM: + return GL_RGB10_A2; + case AHARDWAREBUFFER_FORMAT_BLOB: + return GL_NONE; + case AHARDWAREBUFFER_FORMAT_D16_UNORM: + return GL_DEPTH_COMPONENT16; + case AHARDWAREBUFFER_FORMAT_D24_UNORM: + return GL_DEPTH_COMPONENT24; + case AHARDWAREBUFFER_FORMAT_D24_UNORM_S8_UINT: + return GL_DEPTH24_STENCIL8; + case AHARDWAREBUFFER_FORMAT_D32_FLOAT: + return GL_DEPTH_COMPONENT32F; + case AHARDWAREBUFFER_FORMAT_D32_FLOAT_S8_UINT: + return GL_DEPTH32F_STENCIL8; + case AHARDWAREBUFFER_FORMAT_S8_UINT: + return GL_STENCIL_INDEX8; + default: + return GL_NONE; + } +} + +AHardwareBuffer *ANativeWindowBufferToAHardwareBuffer(ANativeWindowBuffer *windowBuffer) +{ + return offsetPointer<AHardwareBuffer>(windowBuffer, + -kAHardwareBufferToANativeWindowBufferOffset); +} + +EGLClientBuffer AHardwareBufferToClientBuffer(const AHardwareBuffer *hardwareBuffer) +{ + return offsetPointer<EGLClientBuffer>(hardwareBuffer, + kAHardwareBufferToANativeWindowBufferOffset); +} + +} // namespace android +} // namespace angle diff --git a/gfx/angle/checkout/src/common/android_util.h b/gfx/angle/checkout/src/common/android_util.h new file mode 100644 index 0000000000..9826c4ab61 --- /dev/null +++ b/gfx/angle/checkout/src/common/android_util.h @@ -0,0 +1,39 @@ +// +// Copyright 2018 The ANGLE Project Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// + +// android_util.h: Utilities for the using the Android platform + +#ifndef COMMON_ANDROIDUTIL_H_ +#define COMMON_ANDROIDUTIL_H_ + +#include <EGL/egl.h> +#include "angle_gl.h" + +struct ANativeWindowBuffer; +struct AHardwareBuffer; + +namespace angle +{ + +namespace android +{ + +ANativeWindowBuffer *ClientBufferToANativeWindowBuffer(EGLClientBuffer clientBuffer); +EGLClientBuffer AHardwareBufferToClientBuffer(const AHardwareBuffer *hardwareBuffer); + +void GetANativeWindowBufferProperties(const ANativeWindowBuffer *buffer, + int *width, + int *height, + int *depth, + int *pixelFormat); +GLenum NativePixelFormatToGLInternalFormat(int pixelFormat); + +AHardwareBuffer *ANativeWindowBufferToAHardwareBuffer(ANativeWindowBuffer *windowBuffer); + +} // namespace android +} // namespace angle + +#endif // COMMON_ANDROIDUTIL_H_ diff --git a/gfx/angle/checkout/src/common/angleutils.cpp b/gfx/angle/checkout/src/common/angleutils.cpp new file mode 100644 index 0000000000..5b7836d32d --- /dev/null +++ b/gfx/angle/checkout/src/common/angleutils.cpp @@ -0,0 +1,74 @@ +// +// Copyright (c) 2014 The ANGLE Project Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// + +#include "common/angleutils.h" +#include "common/debug.h" + +#include <stdio.h> + +#include <limits> +#include <vector> + +namespace angle +{ +// dirtyPointer is a special value that will make the comparison with any valid pointer fail and +// force the renderer to re-apply the state. +const uintptr_t DirtyPointer = std::numeric_limits<uintptr_t>::max(); +} // namespace angle + +std::string ArrayString(unsigned int i) +{ + // We assume that UINT_MAX and GL_INVALID_INDEX are equal. + ASSERT(i != UINT_MAX); + + std::stringstream strstr; + strstr << "["; + strstr << i; + strstr << "]"; + return strstr.str(); +} + +std::string ArrayIndexString(const std::vector<unsigned int> &indices) +{ + std::stringstream strstr; + + for (auto indicesIt = indices.rbegin(); indicesIt != indices.rend(); ++indicesIt) + { + // We assume that UINT_MAX and GL_INVALID_INDEX are equal. + ASSERT(*indicesIt != UINT_MAX); + strstr << "["; + strstr << (*indicesIt); + strstr << "]"; + } + + return strstr.str(); +} + +size_t FormatStringIntoVector(const char *fmt, va_list vararg, std::vector<char> &outBuffer) +{ + // The state of the va_list passed to vsnprintf is undefined after the call, do a copy in case + // we need to grow the buffer. + va_list varargCopy; + va_copy(varargCopy, vararg); + + // Attempt to just print to the current buffer + int len = vsnprintf(&(outBuffer.front()), outBuffer.size(), fmt, varargCopy); + va_end(varargCopy); + + if (len < 0 || static_cast<size_t>(len) >= outBuffer.size()) + { + // Buffer was not large enough, calculate the required size and resize the buffer + len = vsnprintf(nullptr, 0, fmt, vararg); + outBuffer.resize(len + 1); + + // Print again + va_copy(varargCopy, vararg); + len = vsnprintf(&(outBuffer.front()), outBuffer.size(), fmt, varargCopy); + va_end(varargCopy); + } + ASSERT(len >= 0); + return static_cast<size_t>(len); +} diff --git a/gfx/angle/checkout/src/common/angleutils.h b/gfx/angle/checkout/src/common/angleutils.h new file mode 100644 index 0000000000..09adb58892 --- /dev/null +++ b/gfx/angle/checkout/src/common/angleutils.h @@ -0,0 +1,333 @@ +// +// Copyright (c) 2002-2014 The ANGLE Project Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// + +// angleutils.h: Common ANGLE utilities. + +#ifndef COMMON_ANGLEUTILS_H_ +#define COMMON_ANGLEUTILS_H_ + +#include "common/platform.h" + +#include <climits> +#include <cstdarg> +#include <cstddef> +#include <set> +#include <sstream> +#include <string> +#include <vector> + +// A helper class to disallow copy and assignment operators +namespace angle +{ + +#if defined(ANGLE_ENABLE_D3D9) || defined(ANGLE_ENABLE_D3D11) +using Microsoft::WRL::ComPtr; +#endif // defined(ANGLE_ENABLE_D3D9) || defined(ANGLE_ENABLE_D3D11) + +class NonCopyable +{ + protected: + constexpr NonCopyable() = default; + ~NonCopyable() = default; + + private: + NonCopyable(const NonCopyable &) = delete; + void operator=(const NonCopyable &) = delete; +}; + +extern const uintptr_t DirtyPointer; + +} // namespace angle + +template <typename T, size_t N> +constexpr inline size_t ArraySize(T (&)[N]) +{ + return N; +} + +template <typename T> +class WrappedArray final : angle::NonCopyable +{ + public: + template <size_t N> + constexpr WrappedArray(const T (&data)[N]) : mArray(&data[0]), mSize(N) + {} + + constexpr WrappedArray() : mArray(nullptr), mSize(0) {} + constexpr WrappedArray(const T *data, size_t size) : mArray(data), mSize(size) {} + + WrappedArray(WrappedArray &&other) : WrappedArray() + { + std::swap(mArray, other.mArray); + std::swap(mSize, other.mSize); + } + + ~WrappedArray() {} + + constexpr const T *get() const { return mArray; } + constexpr size_t size() const { return mSize; } + + private: + const T *mArray; + size_t mSize; +}; + +template <typename T, unsigned int N> +void SafeRelease(T (&resourceBlock)[N]) +{ + for (unsigned int i = 0; i < N; i++) + { + SafeRelease(resourceBlock[i]); + } +} + +template <typename T> +void SafeRelease(T &resource) +{ + if (resource) + { + resource->Release(); + resource = nullptr; + } +} + +template <typename T> +void SafeDelete(T *&resource) +{ + delete resource; + resource = nullptr; +} + +template <typename T> +void SafeDeleteContainer(T &resource) +{ + for (auto &element : resource) + { + SafeDelete(element); + } + resource.clear(); +} + +template <typename T> +void SafeDeleteArray(T *&resource) +{ + delete[] resource; + resource = nullptr; +} + +// Provide a less-than function for comparing structs +// Note: struct memory must be initialized to zero, because of packing gaps +template <typename T> +inline bool StructLessThan(const T &a, const T &b) +{ + return (memcmp(&a, &b, sizeof(T)) < 0); +} + +// Provide a less-than function for comparing structs +// Note: struct memory must be initialized to zero, because of packing gaps +template <typename T> +inline bool StructEquals(const T &a, const T &b) +{ + return (memcmp(&a, &b, sizeof(T)) == 0); +} + +template <typename T> +inline void StructZero(T *obj) +{ + memset(obj, 0, sizeof(T)); +} + +template <typename T> +inline bool IsMaskFlagSet(T mask, T flag) +{ + // Handles multibit flags as well + return (mask & flag) == flag; +} + +inline const char *MakeStaticString(const std::string &str) +{ + // On the heap so that no destructor runs on application exit. + static std::set<std::string> *strings = new std::set<std::string>; + std::set<std::string>::iterator it = strings->find(str); + if (it != strings->end()) + { + return it->c_str(); + } + + return strings->insert(str).first->c_str(); +} + +std::string ArrayString(unsigned int i); + +// Indices are stored in vectors with the outermost index in the back. In the output of the function +// the indices are reversed. +std::string ArrayIndexString(const std::vector<unsigned int> &indices); + +inline std::string Str(int i) +{ + std::stringstream strstr; + strstr << i; + return strstr.str(); +} + +size_t FormatStringIntoVector(const char *fmt, va_list vararg, std::vector<char> &buffer); + +template <typename T> +std::string ToString(const T &value) +{ + std::ostringstream o; + o << value; + return o.str(); +} + +// snprintf is not defined with MSVC prior to to msvc14 +#if defined(_MSC_VER) && _MSC_VER < 1900 +# define snprintf _snprintf +#endif + +#define GL_A1RGB5_ANGLEX 0x6AC5 +#define GL_BGRX8_ANGLEX 0x6ABA +#define GL_BGR565_ANGLEX 0x6ABB +#define GL_BGRA4_ANGLEX 0x6ABC +#define GL_BGR5_A1_ANGLEX 0x6ABD +#define GL_INT_64_ANGLEX 0x6ABE +#define GL_UINT_64_ANGLEX 0x6ABF +#define GL_BGRA8_SRGB_ANGLEX 0x6AC0 + +// These are dummy formats used to fit typeless D3D textures that can be bound to EGL pbuffers into +// the format system (for extension EGL_ANGLE_d3d_texture_client_buffer): +#define GL_RGBA8_TYPELESS_ANGLEX 0x6AC1 +#define GL_RGBA8_TYPELESS_SRGB_ANGLEX 0x6AC2 +#define GL_BGRA8_TYPELESS_ANGLEX 0x6AC3 +#define GL_BGRA8_TYPELESS_SRGB_ANGLEX 0x6AC4 + +#define GL_R8_SSCALED_ANGLEX 0x6AC6 +#define GL_RG8_SSCALED_ANGLEX 0x6AC7 +#define GL_RGB8_SSCALED_ANGLEX 0x6AC8 +#define GL_RGBA8_SSCALED_ANGLEX 0x6AC9 +#define GL_R8_USCALED_ANGLEX 0x6ACA +#define GL_RG8_USCALED_ANGLEX 0x6ACB +#define GL_RGB8_USCALED_ANGLEX 0x6ACC +#define GL_RGBA8_USCALED_ANGLEX 0x6ACD + +#define GL_R16_SSCALED_ANGLEX 0x6ACE +#define GL_RG16_SSCALED_ANGLEX 0x6ACF +#define GL_RGB16_SSCALED_ANGLEX 0x6AD0 +#define GL_RGBA16_SSCALED_ANGLEX 0x6AD1 +#define GL_R16_USCALED_ANGLEX 0x6AD2 +#define GL_RG16_USCALED_ANGLEX 0x6AD3 +#define GL_RGB16_USCALED_ANGLEX 0x6AD4 +#define GL_RGBA16_USCALED_ANGLEX 0x6AD5 + +#define GL_R32_SSCALED_ANGLEX 0x6AD6 +#define GL_RG32_SSCALED_ANGLEX 0x6AD7 +#define GL_RGB32_SSCALED_ANGLEX 0x6AD8 +#define GL_RGBA32_SSCALED_ANGLEX 0x6AD9 +#define GL_R32_USCALED_ANGLEX 0x6ADA +#define GL_RG32_USCALED_ANGLEX 0x6ADB +#define GL_RGB32_USCALED_ANGLEX 0x6ADC +#define GL_RGBA32_USCALED_ANGLEX 0x6ADD + +#define GL_R32_SNORM_ANGLEX 0x6ADE +#define GL_RG32_SNORM_ANGLEX 0x6ADF +#define GL_RGB32_SNORM_ANGLEX 0x6AE0 +#define GL_RGBA32_SNORM_ANGLEX 0x6AE1 +#define GL_R32_UNORM_ANGLEX 0x6AE2 +#define GL_RG32_UNORM_ANGLEX 0x6AE3 +#define GL_RGB32_UNORM_ANGLEX 0x6AE4 +#define GL_RGBA32_UNORM_ANGLEX 0x6AE5 + +#define GL_R32_FIXED_ANGLEX 0x6AE6 +#define GL_RG32_FIXED_ANGLEX 0x6AE7 +#define GL_RGB32_FIXED_ANGLEX 0x6AE8 +#define GL_RGBA32_FIXED_ANGLEX 0x6AE9 + +#define GL_RGB10_A2_SINT_ANGLEX 0x6AEA +#define GL_RGB10_A2_SNORM_ANGLEX 0x6AEB +#define GL_RGB10_A2_SSCALED_ANGLEX 0x6AEC +#define GL_RGB10_A2_USCALED_ANGLEX 0x6AED + +#define ANGLE_CHECK_GL_ALLOC(context, result) \ + ANGLE_CHECK(context, result, "Failed to allocate host memory", GL_OUT_OF_MEMORY) + +#define ANGLE_CHECK_GL_MATH(context, result) \ + ANGLE_CHECK(context, result, "Integer overflow.", GL_INVALID_OPERATION) + +#define ANGLE_GL_UNREACHABLE(context) \ + UNREACHABLE(); \ + ANGLE_CHECK(context, false, "Unreachable Code.", GL_INVALID_OPERATION) + +// The below inlining code lifted from V8. +#if defined(__clang__) || (defined(__GNUC__) && defined(__has_attribute)) +# define ANGLE_HAS_ATTRIBUTE_ALWAYS_INLINE (__has_attribute(always_inline)) +# define ANGLE_HAS___FORCEINLINE 0 +#elif defined(_MSC_VER) +# define ANGLE_HAS_ATTRIBUTE_ALWAYS_INLINE 0 +# define ANGLE_HAS___FORCEINLINE 1 +#else +# define ANGLE_HAS_ATTRIBUTE_ALWAYS_INLINE 0 +# define ANGLE_HAS___FORCEINLINE 0 +#endif + +#if defined(NDEBUG) && ANGLE_HAS_ATTRIBUTE_ALWAYS_INLINE +# define ANGLE_INLINE inline __attribute__((always_inline)) +#elif defined(NDEBUG) && ANGLE_HAS___FORCEINLINE +# define ANGLE_INLINE __forceinline +#else +# define ANGLE_INLINE inline +#endif + +#if defined(__clang__) || (defined(__GNUC__) && defined(__has_attribute)) +# if __has_attribute(noinline) +# define ANGLE_NOINLINE __attribute__((noinline)) +# else +# define ANGLE_NOINLINE +# endif +#elif defined(_MSC_VER) +# define ANGLE_NOINLINE __declspec(noinline) +#else +# define ANGLE_NOINLINE +#endif + +#if defined(__clang__) || (defined(__GNUC__) && defined(__has_attribute)) +# if __has_attribute(format) +# define ANGLE_FORMAT_PRINTF(fmt, args) __attribute__((format(__printf__, fmt, args))) +# else +# define ANGLE_FORMAT_PRINTF(fmt, args) +# endif +#else +# define ANGLE_FORMAT_PRINTF(fmt, args) +#endif + +// Format messes up the # inside the macro. +// clang-format off +#ifndef ANGLE_STRINGIFY +# define ANGLE_STRINGIFY(x) #x +#endif +// clang-format on + +#ifndef ANGLE_MACRO_STRINGIFY +# define ANGLE_MACRO_STRINGIFY(x) ANGLE_STRINGIFY(x) +#endif + +// Detect support for C++17 [[nodiscard]] +#if !defined(__has_cpp_attribute) +# define __has_cpp_attribute(name) 0 +#endif // !defined(__has_cpp_attribute) + +#if __has_cpp_attribute(nodiscard) +# define ANGLE_NO_DISCARD [[nodiscard]] +#else +# define ANGLE_NO_DISCARD +#endif // __has_cpp_attribute(nodiscard) + +#if __has_cpp_attribute(maybe_unused) +# define ANGLE_MAYBE_UNUSED [[maybe_unused]] +#else +# define ANGLE_MAYBE_UNUSED +#endif // __has_cpp_attribute(maybe_unused) + +#endif // COMMON_ANGLEUTILS_H_ diff --git a/gfx/angle/checkout/src/common/bitset_utils.h b/gfx/angle/checkout/src/common/bitset_utils.h new file mode 100644 index 0000000000..0d04a8008b --- /dev/null +++ b/gfx/angle/checkout/src/common/bitset_utils.h @@ -0,0 +1,564 @@ +// +// Copyright 2015 The ANGLE Project Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// +// bitset_utils: +// Bitset-related helper classes, such as a fast iterator to scan for set bits. +// + +#ifndef COMMON_BITSETITERATOR_H_ +#define COMMON_BITSETITERATOR_H_ + +#include <stdint.h> + +#include <bitset> + +#include "common/angleutils.h" +#include "common/debug.h" +#include "common/mathutil.h" +#include "common/platform.h" + +namespace angle +{ +template <typename BitsT, typename ParamT> +constexpr static BitsT Bit(ParamT x) +{ + return (static_cast<BitsT>(1) << static_cast<size_t>(x)); +} + +template <size_t N, typename BitsT, typename ParamT = std::size_t> +class BitSetT final +{ + public: + class Reference final + { + public: + ~Reference() {} + Reference &operator=(bool x) + { + mParent->set(mBit, x); + return *this; + } + explicit operator bool() const { return mParent->test(mBit); } + + private: + friend class BitSetT; + + Reference(BitSetT *parent, ParamT bit) : mParent(parent), mBit(bit) {} + + BitSetT *mParent; + ParamT mBit; + }; + + class Iterator final + { + public: + Iterator(const BitSetT &bits); + Iterator &operator++(); + + bool operator==(const Iterator &other) const; + bool operator!=(const Iterator &other) const; + ParamT operator*() const; + + // These helper functions allow mutating an iterator in-flight. + // They only operate on later bits to ensure we don't iterate the same bit twice. + void resetLaterBit(std::size_t index) + { + ASSERT(index > mCurrentBit); + mBitsCopy.reset(index); + } + + void setLaterBit(std::size_t index) + { + ASSERT(index > mCurrentBit); + mBitsCopy.set(index); + } + + private: + std::size_t getNextBit(); + + BitSetT mBitsCopy; + std::size_t mCurrentBit; + }; + + BitSetT(); + constexpr explicit BitSetT(BitsT value); + + BitSetT(const BitSetT &other); + BitSetT &operator=(const BitSetT &other); + + bool operator==(const BitSetT &other) const; + bool operator!=(const BitSetT &other) const; + + constexpr bool operator[](ParamT pos) const; + Reference operator[](ParamT pos) { return Reference(this, pos); } + + bool test(ParamT pos) const; + + bool all() const; + bool any() const; + bool none() const; + std::size_t count() const; + + constexpr std::size_t size() const { return N; } + + BitSetT &operator&=(const BitSetT &other); + BitSetT &operator|=(const BitSetT &other); + BitSetT &operator^=(const BitSetT &other); + BitSetT operator~() const; + + BitSetT &operator&=(BitsT value); + BitSetT &operator|=(BitsT value); + BitSetT &operator^=(BitsT value); + + BitSetT operator<<(std::size_t pos) const; + BitSetT &operator<<=(std::size_t pos); + BitSetT operator>>(std::size_t pos) const; + BitSetT &operator>>=(std::size_t pos); + + BitSetT &set(); + BitSetT &set(ParamT pos, bool value = true); + + BitSetT &reset(); + BitSetT &reset(ParamT pos); + + BitSetT &flip(); + BitSetT &flip(ParamT pos); + + unsigned long to_ulong() const { return static_cast<unsigned long>(mBits); } + BitsT bits() const { return mBits; } + + Iterator begin() const { return Iterator(*this); } + Iterator end() const { return Iterator(BitSetT()); } + + private: + // Produces a mask of ones up to the "x"th bit. + constexpr static BitsT Mask(std::size_t x) + { + return ((Bit<BitsT>(static_cast<ParamT>(x - 1)) - 1) << 1) + 1; + } + + BitsT mBits; +}; + +template <size_t N> +class IterableBitSet : public std::bitset<N> +{ + public: + IterableBitSet() {} + IterableBitSet(const std::bitset<N> &implicitBitSet) : std::bitset<N>(implicitBitSet) {} + + class Iterator final + { + public: + Iterator(const std::bitset<N> &bits); + Iterator &operator++(); + + bool operator==(const Iterator &other) const; + bool operator!=(const Iterator &other) const; + unsigned long operator*() const { return mCurrentBit; } + + // These helper functions allow mutating an iterator in-flight. + // They only operate on later bits to ensure we don't iterate the same bit twice. + void resetLaterBit(std::size_t index) + { + ASSERT(index > mCurrentBit); + mBits.reset(index - mOffset); + } + + void setLaterBit(std::size_t index) + { + ASSERT(index > mCurrentBit); + mBits.set(index - mOffset); + } + + private: + unsigned long getNextBit(); + + static constexpr size_t BitsPerWord = sizeof(uint32_t) * 8; + std::bitset<N> mBits; + unsigned long mCurrentBit; + unsigned long mOffset; + }; + + Iterator begin() const { return Iterator(*this); } + Iterator end() const { return Iterator(std::bitset<N>(0)); } +}; + +template <size_t N> +IterableBitSet<N>::Iterator::Iterator(const std::bitset<N> &bitset) + : mBits(bitset), mCurrentBit(0), mOffset(0) +{ + if (mBits.any()) + { + mCurrentBit = getNextBit(); + } + else + { + mOffset = static_cast<unsigned long>(rx::roundUp(N, BitsPerWord)); + } +} + +template <size_t N> +ANGLE_INLINE typename IterableBitSet<N>::Iterator &IterableBitSet<N>::Iterator::operator++() +{ + ASSERT(mBits.any()); + mBits.set(mCurrentBit - mOffset, 0); + mCurrentBit = getNextBit(); + return *this; +} + +template <size_t N> +bool IterableBitSet<N>::Iterator::operator==(const Iterator &other) const +{ + return mOffset == other.mOffset && mBits == other.mBits; +} + +template <size_t N> +bool IterableBitSet<N>::Iterator::operator!=(const Iterator &other) const +{ + return !(*this == other); +} + +template <size_t N> +unsigned long IterableBitSet<N>::Iterator::getNextBit() +{ + // TODO(jmadill): Use 64-bit scan when possible. + static constexpr std::bitset<N> wordMask(std::numeric_limits<uint32_t>::max()); + + while (mOffset < N) + { + uint32_t wordBits = static_cast<uint32_t>((mBits & wordMask).to_ulong()); + if (wordBits != 0) + { + return gl::ScanForward(wordBits) + mOffset; + } + + mBits >>= BitsPerWord; + mOffset += BitsPerWord; + } + return 0; +} + +template <size_t N, typename BitsT, typename ParamT> +BitSetT<N, BitsT, ParamT>::BitSetT() : mBits(0) +{ + static_assert(N > 0, "Bitset type cannot support zero bits."); + static_assert(N <= sizeof(BitsT) * 8, "Bitset type cannot support a size this large."); +} + +template <size_t N, typename BitsT, typename ParamT> +constexpr BitSetT<N, BitsT, ParamT>::BitSetT(BitsT value) : mBits(value & Mask(N)) +{} + +template <size_t N, typename BitsT, typename ParamT> +BitSetT<N, BitsT, ParamT>::BitSetT(const BitSetT &other) : mBits(other.mBits) +{} + +template <size_t N, typename BitsT, typename ParamT> +BitSetT<N, BitsT, ParamT> &BitSetT<N, BitsT, ParamT>::operator=(const BitSetT &other) +{ + mBits = other.mBits; + return *this; +} + +template <size_t N, typename BitsT, typename ParamT> +bool BitSetT<N, BitsT, ParamT>::operator==(const BitSetT &other) const +{ + return mBits == other.mBits; +} + +template <size_t N, typename BitsT, typename ParamT> +bool BitSetT<N, BitsT, ParamT>::operator!=(const BitSetT &other) const +{ + return mBits != other.mBits; +} + +template <size_t N, typename BitsT, typename ParamT> +constexpr bool BitSetT<N, BitsT, ParamT>::operator[](ParamT pos) const +{ + return test(pos); +} + +template <size_t N, typename BitsT, typename ParamT> +bool BitSetT<N, BitsT, ParamT>::test(ParamT pos) const +{ + return (mBits & Bit<BitsT>(pos)) != 0; +} + +template <size_t N, typename BitsT, typename ParamT> +bool BitSetT<N, BitsT, ParamT>::all() const +{ + ASSERT(mBits == (mBits & Mask(N))); + return mBits == Mask(N); +} + +template <size_t N, typename BitsT, typename ParamT> +bool BitSetT<N, BitsT, ParamT>::any() const +{ + ASSERT(mBits == (mBits & Mask(N))); + return (mBits != 0); +} + +template <size_t N, typename BitsT, typename ParamT> +bool BitSetT<N, BitsT, ParamT>::none() const +{ + ASSERT(mBits == (mBits & Mask(N))); + return (mBits == 0); +} + +template <size_t N, typename BitsT, typename ParamT> +std::size_t BitSetT<N, BitsT, ParamT>::count() const +{ + return gl::BitCount(mBits); +} + +template <size_t N, typename BitsT, typename ParamT> +BitSetT<N, BitsT, ParamT> &BitSetT<N, BitsT, ParamT>::operator&=(const BitSetT &other) +{ + mBits &= other.mBits; + return *this; +} + +template <size_t N, typename BitsT, typename ParamT> +BitSetT<N, BitsT, ParamT> &BitSetT<N, BitsT, ParamT>::operator|=(const BitSetT &other) +{ + mBits |= other.mBits; + return *this; +} + +template <size_t N, typename BitsT, typename ParamT> +BitSetT<N, BitsT, ParamT> &BitSetT<N, BitsT, ParamT>::operator^=(const BitSetT &other) +{ + mBits = mBits ^ other.mBits; + return *this; +} + +template <size_t N, typename BitsT, typename ParamT> +BitSetT<N, BitsT, ParamT> BitSetT<N, BitsT, ParamT>::operator~() const +{ + return BitSetT<N, BitsT, ParamT>(~mBits & Mask(N)); +} + +template <size_t N, typename BitsT, typename ParamT> +BitSetT<N, BitsT, ParamT> &BitSetT<N, BitsT, ParamT>::operator&=(BitsT value) +{ + mBits &= value; + return *this; +} + +template <size_t N, typename BitsT, typename ParamT> +BitSetT<N, BitsT, ParamT> &BitSetT<N, BitsT, ParamT>::operator|=(BitsT value) +{ + mBits |= value & Mask(N); + return *this; +} + +template <size_t N, typename BitsT, typename ParamT> +BitSetT<N, BitsT, ParamT> &BitSetT<N, BitsT, ParamT>::operator^=(BitsT value) +{ + mBits ^= value & Mask(N); + return *this; +} + +template <size_t N, typename BitsT, typename ParamT> +BitSetT<N, BitsT, ParamT> BitSetT<N, BitsT, ParamT>::operator<<(std::size_t pos) const +{ + return BitSetT<N, BitsT, ParamT>((mBits << pos) & Mask(N)); +} + +template <size_t N, typename BitsT, typename ParamT> +BitSetT<N, BitsT, ParamT> &BitSetT<N, BitsT, ParamT>::operator<<=(std::size_t pos) +{ + mBits = (mBits << pos & Mask(N)); + return *this; +} + +template <size_t N, typename BitsT, typename ParamT> +BitSetT<N, BitsT, ParamT> BitSetT<N, BitsT, ParamT>::operator>>(std::size_t pos) const +{ + return BitSetT<N, BitsT, ParamT>(mBits >> pos); +} + +template <size_t N, typename BitsT, typename ParamT> +BitSetT<N, BitsT, ParamT> &BitSetT<N, BitsT, ParamT>::operator>>=(std::size_t pos) +{ + mBits = ((mBits >> pos) & Mask(N)); + return *this; +} + +template <size_t N, typename BitsT, typename ParamT> +BitSetT<N, BitsT, ParamT> &BitSetT<N, BitsT, ParamT>::set() +{ + ASSERT(mBits == (mBits & Mask(N))); + mBits = Mask(N); + return *this; +} + +template <size_t N, typename BitsT, typename ParamT> +BitSetT<N, BitsT, ParamT> &BitSetT<N, BitsT, ParamT>::set(ParamT pos, bool value) +{ + ASSERT(mBits == (mBits & Mask(N))); + if (value) + { + mBits |= Bit<BitsT>(pos) & Mask(N); + } + else + { + reset(pos); + } + return *this; +} + +template <size_t N, typename BitsT, typename ParamT> +BitSetT<N, BitsT, ParamT> &BitSetT<N, BitsT, ParamT>::reset() +{ + ASSERT(mBits == (mBits & Mask(N))); + mBits = 0; + return *this; +} + +template <size_t N, typename BitsT, typename ParamT> +BitSetT<N, BitsT, ParamT> &BitSetT<N, BitsT, ParamT>::reset(ParamT pos) +{ + ASSERT(mBits == (mBits & Mask(N))); + mBits &= ~Bit<BitsT>(pos); + return *this; +} + +template <size_t N, typename BitsT, typename ParamT> +BitSetT<N, BitsT, ParamT> &BitSetT<N, BitsT, ParamT>::flip() +{ + ASSERT(mBits == (mBits & Mask(N))); + mBits ^= Mask(N); + return *this; +} + +template <size_t N, typename BitsT, typename ParamT> +BitSetT<N, BitsT, ParamT> &BitSetT<N, BitsT, ParamT>::flip(ParamT pos) +{ + ASSERT(mBits == (mBits & Mask(N))); + mBits ^= Bit<BitsT>(pos) & Mask(N); + return *this; +} + +template <size_t N, typename BitsT, typename ParamT> +BitSetT<N, BitsT, ParamT>::Iterator::Iterator(const BitSetT &bits) : mBitsCopy(bits), mCurrentBit(0) +{ + if (bits.any()) + { + mCurrentBit = getNextBit(); + } +} + +template <size_t N, typename BitsT, typename ParamT> +ANGLE_INLINE typename BitSetT<N, BitsT, ParamT>::Iterator &BitSetT<N, BitsT, ParamT>::Iterator:: +operator++() +{ + ASSERT(mBitsCopy.any()); + mBitsCopy.reset(static_cast<ParamT>(mCurrentBit)); + mCurrentBit = getNextBit(); + return *this; +} + +template <size_t N, typename BitsT, typename ParamT> +bool BitSetT<N, BitsT, ParamT>::Iterator::operator==(const Iterator &other) const +{ + return mBitsCopy == other.mBitsCopy; +} + +template <size_t N, typename BitsT, typename ParamT> +bool BitSetT<N, BitsT, ParamT>::Iterator::operator!=(const Iterator &other) const +{ + return !(*this == other); +} + +template <size_t N, typename BitsT, typename ParamT> +ParamT BitSetT<N, BitsT, ParamT>::Iterator::operator*() const +{ + return static_cast<ParamT>(mCurrentBit); +} + +template <size_t N, typename BitsT, typename ParamT> +std::size_t BitSetT<N, BitsT, ParamT>::Iterator::getNextBit() +{ + if (mBitsCopy.none()) + { + return 0; + } + + return gl::ScanForward(mBitsCopy.mBits); +} + +template <size_t N> +using BitSet32 = BitSetT<N, uint32_t>; + +// ScanForward for 64-bits requires a 64-bit implementation. +#if defined(ANGLE_IS_64_BIT_CPU) +template <size_t N> +using BitSet64 = BitSetT<N, uint64_t>; +#endif // defined(ANGLE_IS_64_BIT_CPU) + +namespace priv +{ + +template <size_t N, typename T> +using EnableIfBitsFit = typename std::enable_if<N <= sizeof(T) * 8>::type; + +template <size_t N, typename Enable = void> +struct GetBitSet +{ + using Type = IterableBitSet<N>; +}; + +// Prefer 64-bit bitsets on 64-bit CPUs. They seem faster than 32-bit. +#if defined(ANGLE_IS_64_BIT_CPU) +template <size_t N> +struct GetBitSet<N, EnableIfBitsFit<N, uint64_t>> +{ + using Type = BitSet64<N>; +}; +#else +template <size_t N> +struct GetBitSet<N, EnableIfBitsFit<N, uint32_t>> +{ + using Type = BitSet32<N>; +}; +#endif // defined(ANGLE_IS_64_BIT_CPU) + +} // namespace priv + +template <size_t N> +using BitSet = typename priv::GetBitSet<N>::Type; + +} // namespace angle + +template <size_t N, typename BitsT, typename ParamT> +inline angle::BitSetT<N, BitsT, ParamT> operator&(const angle::BitSetT<N, BitsT, ParamT> &lhs, + const angle::BitSetT<N, BitsT, ParamT> &rhs) +{ + angle::BitSetT<N, BitsT, ParamT> result(lhs); + result &= rhs.bits(); + return result; +} + +template <size_t N, typename BitsT, typename ParamT> +inline angle::BitSetT<N, BitsT, ParamT> operator|(const angle::BitSetT<N, BitsT, ParamT> &lhs, + const angle::BitSetT<N, BitsT, ParamT> &rhs) +{ + angle::BitSetT<N, BitsT, ParamT> result(lhs); + result |= rhs.bits(); + return result; +} + +template <size_t N, typename BitsT, typename ParamT> +inline angle::BitSetT<N, BitsT, ParamT> operator^(const angle::BitSetT<N, BitsT, ParamT> &lhs, + const angle::BitSetT<N, BitsT, ParamT> &rhs) +{ + angle::BitSetT<N, BitsT, ParamT> result(lhs); + result ^= rhs.bits(); + return result; +} + +#endif // COMMON_BITSETITERATOR_H_ diff --git a/gfx/angle/checkout/src/common/debug.cpp b/gfx/angle/checkout/src/common/debug.cpp new file mode 100644 index 0000000000..9419096c3b --- /dev/null +++ b/gfx/angle/checkout/src/common/debug.cpp @@ -0,0 +1,285 @@ +// +// Copyright (c) 2002-2010 The ANGLE Project Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// + +// debug.cpp: Debugging utilities. + +#include "common/debug.h" + +#include <stdarg.h> + +#include <array> +#include <cstdio> +#include <fstream> +#include <mutex> +#include <ostream> +#include <vector> + +#if defined(ANGLE_PLATFORM_ANDROID) +# include <android/log.h> +#endif + +#include "common/Optional.h" +#include "common/angleutils.h" +#include "common/system_utils.h" + +namespace gl +{ + +namespace +{ + +DebugAnnotator *g_debugAnnotator = nullptr; + +std::mutex *g_debugMutex = nullptr; + +constexpr std::array<const char *, LOG_NUM_SEVERITIES> g_logSeverityNames = { + {"EVENT", "INFO", "WARN", "ERR", "FATAL"}}; + +constexpr const char *LogSeverityName(int severity) +{ + return (severity >= 0 && severity < LOG_NUM_SEVERITIES) ? g_logSeverityNames[severity] + : "UNKNOWN"; +} + +bool ShouldCreateLogMessage(LogSeverity severity) +{ +#if defined(ANGLE_TRACE_ENABLED) + return true; +#elif defined(ANGLE_ENABLE_ASSERTS) + return severity != LOG_EVENT; +#else + return false; +#endif +} + +} // namespace + +namespace priv +{ + +bool ShouldCreatePlatformLogMessage(LogSeverity severity) +{ +#if defined(ANGLE_TRACE_ENABLED) + return true; +#else + return severity != LOG_EVENT; +#endif +} + +// This is never instantiated, it's just used for EAT_STREAM_PARAMETERS to an object of the correct +// type on the LHS of the unused part of the ternary operator. +std::ostream *gSwallowStream; +} // namespace priv + +bool DebugAnnotationsActive() +{ +#if defined(ANGLE_ENABLE_DEBUG_ANNOTATIONS) + return g_debugAnnotator != nullptr && g_debugAnnotator->getStatus(); +#else + return false; +#endif +} + +bool DebugAnnotationsInitialized() +{ + return g_debugAnnotator != nullptr; +} + +void InitializeDebugAnnotations(DebugAnnotator *debugAnnotator) +{ + UninitializeDebugAnnotations(); + g_debugAnnotator = debugAnnotator; +} + +void UninitializeDebugAnnotations() +{ + // Pointer is not managed. + g_debugAnnotator = nullptr; +} + +void InitializeDebugMutexIfNeeded() +{ + if (g_debugMutex == nullptr) + { + g_debugMutex = new std::mutex(); + } +} + +ScopedPerfEventHelper::ScopedPerfEventHelper(const char *format, ...) : mFunctionName(nullptr) +{ + bool dbgTrace = DebugAnnotationsActive(); +#if !defined(ANGLE_ENABLE_DEBUG_TRACE) + if (!dbgTrace) + { + return; + } +#endif // !ANGLE_ENABLE_DEBUG_TRACE + + va_list vararg; + va_start(vararg, format); + std::vector<char> buffer(512); + size_t len = FormatStringIntoVector(format, vararg, buffer); + ANGLE_LOG(EVENT) << std::string(&buffer[0], len); + // Pull function name from variable args + mFunctionName = va_arg(vararg, const char *); + va_end(vararg); + if (dbgTrace) + { + g_debugAnnotator->beginEvent(mFunctionName, buffer.data()); + } +} + +ScopedPerfEventHelper::~ScopedPerfEventHelper() +{ + if (DebugAnnotationsActive()) + { + g_debugAnnotator->endEvent(mFunctionName); + } +} + +LogMessage::LogMessage(const char *function, int line, LogSeverity severity) + : mFunction(function), mLine(line), mSeverity(severity) +{ + // EVENT() does not require additional function(line) info. + if (mSeverity != LOG_EVENT) + { + mStream << mFunction << "(" << mLine << "): "; + } +} + +LogMessage::~LogMessage() +{ + std::unique_lock<std::mutex> lock; + if (g_debugMutex != nullptr) + { + lock = std::unique_lock<std::mutex>(*g_debugMutex); + } + + if (DebugAnnotationsInitialized() && (mSeverity >= LOG_INFO)) + { + g_debugAnnotator->logMessage(*this); + } + else + { + Trace(getSeverity(), getMessage().c_str()); + } + + if (mSeverity == LOG_FATAL) + { + if (angle::IsDebuggerAttached()) + { + angle::BreakDebugger(); + } + else + { + ANGLE_CRASH(); + } + } +} + +void Trace(LogSeverity severity, const char *message) +{ + if (!ShouldCreateLogMessage(severity)) + { + return; + } + + std::string str(message); + + if (DebugAnnotationsActive()) + { + + switch (severity) + { + case LOG_EVENT: + // Debugging logging done in ScopedPerfEventHelper + break; + default: + g_debugAnnotator->setMarker(message); + break; + } + } + + if (severity == LOG_FATAL || severity == LOG_ERR || severity == LOG_WARN || + severity == LOG_INFO) + { +#if defined(ANGLE_PLATFORM_ANDROID) + android_LogPriority android_priority = ANDROID_LOG_ERROR; + switch (severity) + { + case LOG_INFO: + android_priority = ANDROID_LOG_INFO; + break; + case LOG_WARN: + android_priority = ANDROID_LOG_WARN; + break; + case LOG_ERR: + android_priority = ANDROID_LOG_ERROR; + break; + case LOG_FATAL: + android_priority = ANDROID_LOG_FATAL; + break; + default: + UNREACHABLE(); + } + __android_log_print(android_priority, "ANGLE", "%s: %s\n", LogSeverityName(severity), + str.c_str()); +#else + // Note: we use fprintf because <iostream> includes static initializers. + fprintf((severity >= LOG_ERR) ? stderr : stdout, "%s: %s\n", LogSeverityName(severity), + str.c_str()); +#endif + } + +#if defined(ANGLE_PLATFORM_WINDOWS) && \ + (defined(ANGLE_ENABLE_DEBUG_TRACE_TO_DEBUGGER) || !defined(NDEBUG)) +# if !defined(ANGLE_ENABLE_DEBUG_TRACE_TO_DEBUGGER) + if (severity >= LOG_ERR) +# endif // !defined(ANGLE_ENABLE_DEBUG_TRACE_TO_DEBUGGER) + { + OutputDebugStringA(str.c_str()); + } +#endif + +#if defined(ANGLE_ENABLE_DEBUG_TRACE) +# if defined(NDEBUG) + if (severity == LOG_EVENT || severity == LOG_WARN || severity == LOG_INFO) + { + return; + } +# endif // defined(NDEBUG) + static std::ofstream file(TRACE_OUTPUT_FILE, std::ofstream::app); + if (file) + { + file << LogSeverityName(severity) << ": " << str << std::endl; + file.flush(); + } +#endif // defined(ANGLE_ENABLE_DEBUG_TRACE) +} + +LogSeverity LogMessage::getSeverity() const +{ + return mSeverity; +} + +std::string LogMessage::getMessage() const +{ + return mStream.str(); +} + +#if defined(ANGLE_PLATFORM_WINDOWS) +priv::FmtHexHelper<HRESULT> FmtHR(HRESULT value) +{ + return priv::FmtHexHelper<HRESULT>("HRESULT: ", value); +} + +priv::FmtHexHelper<DWORD> FmtErr(DWORD value) +{ + return priv::FmtHexHelper<DWORD>("error: ", value); +} +#endif // defined(ANGLE_PLATFORM_WINDOWS) + +} // namespace gl diff --git a/gfx/angle/checkout/src/common/debug.h b/gfx/angle/checkout/src/common/debug.h new file mode 100644 index 0000000000..fb5fda3569 --- /dev/null +++ b/gfx/angle/checkout/src/common/debug.h @@ -0,0 +1,366 @@ +// +// Copyright (c) 2002-2010 The ANGLE Project Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// + +// debug.h: Debugging utilities. A lot of the logging code is adapted from Chromium's +// base/logging.h. + +#ifndef COMMON_DEBUG_H_ +#define COMMON_DEBUG_H_ + +#include <assert.h> +#include <stdio.h> + +#include <iomanip> +#include <ios> +#include <sstream> +#include <string> + +#include "common/angleutils.h" +#include "common/platform.h" + +#if !defined(TRACE_OUTPUT_FILE) +# define TRACE_OUTPUT_FILE "angle_debug.txt" +#endif + +namespace gl +{ + +// Pairs a D3D begin event with an end event. +class ScopedPerfEventHelper : angle::NonCopyable +{ + public: + ANGLE_FORMAT_PRINTF(2, 3) + ScopedPerfEventHelper(const char *format, ...); + ~ScopedPerfEventHelper(); + + private: + const char *mFunctionName; +}; + +using LogSeverity = int; +// Note: the log severities are used to index into the array of names, +// see g_logSeverityNames. +constexpr LogSeverity LOG_EVENT = 0; +constexpr LogSeverity LOG_INFO = 1; +constexpr LogSeverity LOG_WARN = 2; +constexpr LogSeverity LOG_ERR = 3; +constexpr LogSeverity LOG_FATAL = 4; +constexpr LogSeverity LOG_NUM_SEVERITIES = 5; + +void Trace(LogSeverity severity, const char *message); + +// This class more or less represents a particular log message. You +// create an instance of LogMessage and then stream stuff to it. +// When you finish streaming to it, ~LogMessage is called and the +// full message gets streamed to the appropriate destination. +// +// You shouldn't actually use LogMessage's constructor to log things, +// though. You should use the ERR() and WARN() macros. +class LogMessage : angle::NonCopyable +{ + public: + // Used for ANGLE_LOG(severity). + LogMessage(const char *function, int line, LogSeverity severity); + ~LogMessage(); + std::ostream &stream() { return mStream; } + + LogSeverity getSeverity() const; + std::string getMessage() const; + + private: + const char *mFunction; + const int mLine; + const LogSeverity mSeverity; + + std::ostringstream mStream; +}; + +// Wraps the API/Platform-specific debug annotation functions. +// Also handles redirecting logging destination. +class DebugAnnotator : angle::NonCopyable +{ + public: + DebugAnnotator() {} + virtual ~DebugAnnotator() {} + virtual void beginEvent(const char *eventName, const char *eventMessage) = 0; + virtual void endEvent(const char *eventName) = 0; + virtual void setMarker(const char *markerName) = 0; + virtual bool getStatus() = 0; + // Log Message Handler that gets passed every log message, + // when debug annotations are initialized, + // replacing default handling by LogMessage. + virtual void logMessage(const LogMessage &msg) const = 0; +}; + +void InitializeDebugAnnotations(DebugAnnotator *debugAnnotator); +void UninitializeDebugAnnotations(); +bool DebugAnnotationsActive(); +bool DebugAnnotationsInitialized(); + +void InitializeDebugMutexIfNeeded(); + +namespace priv +{ +// This class is used to explicitly ignore values in the conditional logging macros. This avoids +// compiler warnings like "value computed is not used" and "statement has no effect". +class LogMessageVoidify +{ + public: + LogMessageVoidify() {} + // This has to be an operator with a precedence lower than << but higher than ?: + void operator&(std::ostream &) {} +}; + +extern std::ostream *gSwallowStream; + +// Used by ANGLE_LOG_IS_ON to lazy-evaluate stream arguments. +bool ShouldCreatePlatformLogMessage(LogSeverity severity); + +template <int N, typename T> +std::ostream &FmtHex(std::ostream &os, T value) +{ + os << "0x"; + + std::ios_base::fmtflags oldFlags = os.flags(); + std::streamsize oldWidth = os.width(); + std::ostream::char_type oldFill = os.fill(); + + os << std::hex << std::uppercase << std::setw(N) << std::setfill('0') << value; + + os.flags(oldFlags); + os.width(oldWidth); + os.fill(oldFill); + + return os; +} + +template <typename T> +std::ostream &FmtHexAutoSized(std::ostream &os, T value) +{ + constexpr int N = sizeof(T) * 2; + return priv::FmtHex<N>(os, value); +} + +template <typename T> +class FmtHexHelper +{ + public: + FmtHexHelper(const char *prefix, T value) : mPrefix(prefix), mValue(value) {} + explicit FmtHexHelper(T value) : mPrefix(nullptr), mValue(value) {} + + private: + const char *mPrefix; + T mValue; + + friend std::ostream &operator<<(std::ostream &os, const FmtHexHelper &fmt) + { + if (fmt.mPrefix) + { + os << fmt.mPrefix; + } + return FmtHexAutoSized(os, fmt.mValue); + } +}; + +} // namespace priv + +template <typename T> +priv::FmtHexHelper<T> FmtHex(T value) +{ + return priv::FmtHexHelper<T>(value); +} + +#if defined(ANGLE_PLATFORM_WINDOWS) +priv::FmtHexHelper<HRESULT> FmtHR(HRESULT value); +priv::FmtHexHelper<DWORD> FmtErr(DWORD value); +#endif // defined(ANGLE_PLATFORM_WINDOWS) + +template <typename T> +std::ostream &FmtHex(std::ostream &os, T value) +{ + return priv::FmtHexAutoSized(os, value); +} + +// A few definitions of macros that don't generate much code. These are used +// by ANGLE_LOG(). Since these are used all over our code, it's +// better to have compact code for these operations. +#define COMPACT_ANGLE_LOG_EX_EVENT(ClassName, ...) \ + ::gl::ClassName(__FUNCTION__, __LINE__, ::gl::LOG_EVENT, ##__VA_ARGS__) +#define COMPACT_ANGLE_LOG_EX_INFO(ClassName, ...) \ + ::gl::ClassName(__FUNCTION__, __LINE__, ::gl::LOG_INFO, ##__VA_ARGS__) +#define COMPACT_ANGLE_LOG_EX_WARN(ClassName, ...) \ + ::gl::ClassName(__FUNCTION__, __LINE__, ::gl::LOG_WARN, ##__VA_ARGS__) +#define COMPACT_ANGLE_LOG_EX_ERR(ClassName, ...) \ + ::gl::ClassName(__FUNCTION__, __LINE__, ::gl::LOG_ERR, ##__VA_ARGS__) +#define COMPACT_ANGLE_LOG_EX_FATAL(ClassName, ...) \ + ::gl::ClassName(__FUNCTION__, __LINE__, ::gl::LOG_FATAL, ##__VA_ARGS__) + +#define COMPACT_ANGLE_LOG_EVENT COMPACT_ANGLE_LOG_EX_EVENT(LogMessage) +#define COMPACT_ANGLE_LOG_INFO COMPACT_ANGLE_LOG_EX_INFO(LogMessage) +#define COMPACT_ANGLE_LOG_WARN COMPACT_ANGLE_LOG_EX_WARN(LogMessage) +#define COMPACT_ANGLE_LOG_ERR COMPACT_ANGLE_LOG_EX_ERR(LogMessage) +#define COMPACT_ANGLE_LOG_FATAL COMPACT_ANGLE_LOG_EX_FATAL(LogMessage) + +#define ANGLE_LOG_IS_ON(severity) (::gl::priv::ShouldCreatePlatformLogMessage(::gl::LOG_##severity)) + +// Helper macro which avoids evaluating the arguments to a stream if the condition doesn't hold. +// Condition is evaluated once and only once. +#define ANGLE_LAZY_STREAM(stream, condition) \ + !(condition) ? static_cast<void>(0) : ::gl::priv::LogMessageVoidify() & (stream) + +// We use the preprocessor's merging operator, "##", so that, e.g., +// ANGLE_LOG(EVENT) becomes the token COMPACT_ANGLE_LOG_EVENT. There's some funny +// subtle difference between ostream member streaming functions (e.g., +// ostream::operator<<(int) and ostream non-member streaming functions +// (e.g., ::operator<<(ostream&, string&): it turns out that it's +// impossible to stream something like a string directly to an unnamed +// ostream. We employ a neat hack by calling the stream() member +// function of LogMessage which seems to avoid the problem. +#define ANGLE_LOG_STREAM(severity) COMPACT_ANGLE_LOG_##severity.stream() + +#define ANGLE_LOG(severity) ANGLE_LAZY_STREAM(ANGLE_LOG_STREAM(severity), ANGLE_LOG_IS_ON(severity)) + +} // namespace gl + +#if defined(ANGLE_ENABLE_DEBUG_TRACE) || defined(ANGLE_ENABLE_DEBUG_ANNOTATIONS) +# define ANGLE_TRACE_ENABLED +#endif + +#if !defined(NDEBUG) || defined(ANGLE_ENABLE_RELEASE_ASSERTS) +# define ANGLE_ENABLE_ASSERTS +#endif + +#define INFO() ANGLE_LOG(INFO) +#define WARN() ANGLE_LOG(WARN) +#define ERR() ANGLE_LOG(ERR) +#define FATAL() ANGLE_LOG(FATAL) + +// A macro to log a performance event around a scope. +#if defined(ANGLE_TRACE_ENABLED) +# if defined(_MSC_VER) +# define EVENT(message, ...) \ + gl::ScopedPerfEventHelper scopedPerfEventHelper##__LINE__("%s" message "\n", \ + __FUNCTION__, __VA_ARGS__) +# else +# define EVENT(message, ...) \ + gl::ScopedPerfEventHelper scopedPerfEventHelper("%s" message "\n", __FUNCTION__, \ + ##__VA_ARGS__) +# endif // _MSC_VER +#else +# define EVENT(message, ...) (void(0)) +#endif + +#if defined(__GNUC__) +# define ANGLE_CRASH() __builtin_trap() +#else +# define ANGLE_CRASH() ((void)(*(volatile char *)0 = 0)), __assume(0) +#endif + +#if !defined(NDEBUG) +# define ANGLE_ASSERT_IMPL(expression) assert(expression) +#else +// TODO(jmadill): Detect if debugger is attached and break. +# define ANGLE_ASSERT_IMPL(expression) ANGLE_CRASH() +#endif // !defined(NDEBUG) + +// Note that gSwallowStream is used instead of an arbitrary LOG() stream to avoid the creation of an +// object with a non-trivial destructor (LogMessage). On MSVC x86 (checked on 2015 Update 3), this +// causes a few additional pointless instructions to be emitted even at full optimization level, +// even though the : arm of the ternary operator is clearly never executed. Using a simpler object +// to be &'d with Voidify() avoids these extra instructions. Using a simpler POD object with a +// templated operator<< also works to avoid these instructions. However, this causes warnings on +// statically defined implementations of operator<<(std::ostream, ...) in some .cpp files, because +// they become defined-but-unreferenced functions. A reinterpret_cast of 0 to an ostream* also is +// not suitable, because some compilers warn of undefined behavior. +#define ANGLE_EAT_STREAM_PARAMETERS \ + true ? static_cast<void>(0) : ::gl::priv::LogMessageVoidify() & (*::gl::priv::gSwallowStream) + +// A macro asserting a condition and outputting failures to the debug log +#if defined(ANGLE_ENABLE_ASSERTS) +# define ASSERT(expression) \ + (expression ? static_cast<void>(0) \ + : (FATAL() << "\t! Assert failed in " << __FUNCTION__ << " (" << __FILE__ \ + << ":" << __LINE__ << "): " << #expression)) +#else +# define ASSERT(condition) ANGLE_EAT_STREAM_PARAMETERS << !(condition) +#endif // defined(ANGLE_ENABLE_ASSERTS) + +#define UNREACHABLE_IS_NORETURN 0 + +#define ANGLE_UNUSED_VARIABLE(variable) (static_cast<void>(variable)) + +// A macro to indicate unimplemented functionality +#ifndef NOASSERT_UNIMPLEMENTED +# define NOASSERT_UNIMPLEMENTED 1 +#endif + +#if defined(ANGLE_TRACE_ENABLED) || defined(ANGLE_ENABLE_ASSERTS) +# define UNIMPLEMENTED() \ + do \ + { \ + WARN() << "\t! Unimplemented: " << __FUNCTION__ << "(" << __FILE__ << ":" << __LINE__ \ + << ")"; \ + ASSERT(NOASSERT_UNIMPLEMENTED); \ + } while (0) + +// A macro for code which is not expected to be reached under valid assumptions +# define UNREACHABLE() \ + do \ + { \ + FATAL() << "\t! Unreachable reached: " << __FUNCTION__ << "(" << __FILE__ << ":" \ + << __LINE__ << ")"; \ + } while (0) +#else +# define UNIMPLEMENTED() \ + do \ + { \ + ASSERT(NOASSERT_UNIMPLEMENTED); \ + } while (0) + +// A macro for code which is not expected to be reached under valid assumptions +# define UNREACHABLE() \ + do \ + { \ + ASSERT(false); \ + } while (0) +#endif // defined(ANGLE_TRACE_ENABLED) || defined(ANGLE_ENABLE_ASSERTS) + +#if defined(ANGLE_PLATFORM_WINDOWS) +# define ANGLE_FUNCTION __FUNCTION__ +#else +# define ANGLE_FUNCTION __func__ +#endif + +// Defining ANGLE_ENABLE_STRUCT_PADDING_WARNINGS will enable warnings when members are added to +// structs to enforce packing. This is helpful for diagnosing unexpected struct sizes when making +// fast cache variables. +#if defined(__clang__) +# define ANGLE_ENABLE_STRUCT_PADDING_WARNINGS \ + _Pragma("clang diagnostic push") _Pragma("clang diagnostic error \"-Wpadded\"") +# define ANGLE_DISABLE_STRUCT_PADDING_WARNINGS _Pragma("clang diagnostic pop") +#elif defined(__GNUC__) +# define ANGLE_ENABLE_STRUCT_PADDING_WARNINGS \ + _Pragma("GCC diagnostic push") _Pragma("GCC diagnostic error \"-Wpadded\"") +# define ANGLE_DISABLE_STRUCT_PADDING_WARNINGS _Pragma("GCC diagnostic pop") +#elif defined(_MSC_VER) +# define ANGLE_ENABLE_STRUCT_PADDING_WARNINGS \ + __pragma(warning(push)) __pragma(warning(error : 4820)) +# define ANGLE_DISABLE_STRUCT_PADDING_WARNINGS __pragma(warning(pop)) +#else +# define ANGLE_ENABLE_STRUCT_PADDING_WARNINGS +# define ANGLE_DISABLE_STRUCT_PADDING_WARNINGS +#endif + +#if defined(__clang__) +# define ANGLE_DISABLE_EXTRA_SEMI_WARNING \ + _Pragma("clang diagnostic push") _Pragma("clang diagnostic ignored \"-Wextra-semi\"") +# define ANGLE_REENABLE_EXTRA_SEMI_WARNING _Pragma("clang diagnostic pop") +#else +# define ANGLE_DISABLE_EXTRA_SEMI_WARNING +# define ANGLE_REENABLE_EXTRA_SEMI_WARNING +#endif + +#endif // COMMON_DEBUG_H_ diff --git a/gfx/angle/checkout/src/common/event_tracer.cpp b/gfx/angle/checkout/src/common/event_tracer.cpp new file mode 100644 index 0000000000..95d1977f5b --- /dev/null +++ b/gfx/angle/checkout/src/common/event_tracer.cpp @@ -0,0 +1,53 @@ +// Copyright (c) 2012 The ANGLE Project Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "common/event_tracer.h" + +#include "common/debug.h" + +namespace angle +{ + +const unsigned char *GetTraceCategoryEnabledFlag(PlatformMethods *platform, const char *name) +{ + ASSERT(platform); + + const unsigned char *categoryEnabledFlag = + platform->getTraceCategoryEnabledFlag(platform, name); + if (categoryEnabledFlag != nullptr) + { + return categoryEnabledFlag; + } + + static unsigned char disabled = 0; + return &disabled; +} + +angle::TraceEventHandle AddTraceEvent(PlatformMethods *platform, + char phase, + const unsigned char *categoryGroupEnabled, + const char *name, + unsigned long long id, + int numArgs, + const char **argNames, + const unsigned char *argTypes, + const unsigned long long *argValues, + unsigned char flags) +{ + ASSERT(platform); + + double timestamp = platform->monotonicallyIncreasingTime(platform); + + if (timestamp != 0) + { + angle::TraceEventHandle handle = + platform->addTraceEvent(platform, phase, categoryGroupEnabled, name, id, timestamp, + numArgs, argNames, argTypes, argValues, flags); + return handle; + } + + return static_cast<angle::TraceEventHandle>(0); +} + +} // namespace angle diff --git a/gfx/angle/checkout/src/common/event_tracer.h b/gfx/angle/checkout/src/common/event_tracer.h new file mode 100644 index 0000000000..231ecfdd6d --- /dev/null +++ b/gfx/angle/checkout/src/common/event_tracer.h @@ -0,0 +1,26 @@ +// Copyright (c) 2012 The ANGLE Project Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef COMMON_EVENT_TRACER_H_ +#define COMMON_EVENT_TRACER_H_ + +#include "common/platform.h" +#include "platform/Platform.h" + +namespace angle +{ +const unsigned char *GetTraceCategoryEnabledFlag(PlatformMethods *platform, const char *name); +angle::TraceEventHandle AddTraceEvent(PlatformMethods *platform, + char phase, + const unsigned char *categoryGroupEnabled, + const char *name, + unsigned long long id, + int numArgs, + const char **argNames, + const unsigned char *argTypes, + const unsigned long long *argValues, + unsigned char flags); +} // namespace angle + +#endif // COMMON_EVENT_TRACER_H_ diff --git a/gfx/angle/checkout/src/common/hash_utils.h b/gfx/angle/checkout/src/common/hash_utils.h new file mode 100644 index 0000000000..aec4e7f77c --- /dev/null +++ b/gfx/angle/checkout/src/common/hash_utils.h @@ -0,0 +1,39 @@ +// +// Copyright 2018 The ANGLE Project Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// +// hash_utils.h: Hashing based helper functions. + +#ifndef COMMON_HASHUTILS_H_ +#define COMMON_HASHUTILS_H_ + +#include "common/debug.h" +#include "common/third_party/xxhash/xxhash.h" + +namespace angle +{ +// Computes a hash of "key". Any data passed to this function must be multiples of +// 4 bytes, since the PMurHash32 method can only operate increments of 4-byte words. +inline std::size_t ComputeGenericHash(const void *key, size_t keySize) +{ + static constexpr unsigned int kSeed = 0xABCDEF98; + + // We can't support "odd" alignments. ComputeGenericHash requires aligned types + ASSERT(keySize % 4 == 0); +#if defined(ANGLE_IS_64_BIT_CPU) + return XXH64(key, keySize, kSeed); +#else + return XXH32(key, keySize, kSeed); +#endif // defined(ANGLE_IS_64_BIT_CPU) +} + +template <typename T> +std::size_t ComputeGenericHash(const T &key) +{ + static_assert(sizeof(key) % 4 == 0, "ComputeGenericHash requires aligned types"); + return ComputeGenericHash(&key, sizeof(key)); +} +} // namespace angle + +#endif // COMMON_HASHUTILS_H_ diff --git a/gfx/angle/checkout/src/common/mathutil.cpp b/gfx/angle/checkout/src/common/mathutil.cpp new file mode 100644 index 0000000000..3e3895fcf1 --- /dev/null +++ b/gfx/angle/checkout/src/common/mathutil.cpp @@ -0,0 +1,93 @@ +// +// Copyright (c) 2013 The ANGLE Project Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// + +// mathutil.cpp: Math and bit manipulation functions. + +#include "common/mathutil.h" + +#include <math.h> +#include <algorithm> + +namespace gl +{ + +namespace +{ + +struct RGB9E5Data +{ + unsigned int R : 9; + unsigned int G : 9; + unsigned int B : 9; + unsigned int E : 5; +}; + +// B is the exponent bias (15) +constexpr int g_sharedexp_bias = 15; + +// N is the number of mantissa bits per component (9) +constexpr int g_sharedexp_mantissabits = 9; + +// Emax is the maximum allowed biased exponent value (31) +constexpr int g_sharedexp_maxexponent = 31; + +constexpr float g_sharedexp_max = + ((static_cast<float>(1 << g_sharedexp_mantissabits) - 1) / + static_cast<float>(1 << g_sharedexp_mantissabits)) * + static_cast<float>(1 << (g_sharedexp_maxexponent - g_sharedexp_bias)); + +} // anonymous namespace + +unsigned int convertRGBFloatsTo999E5(float red, float green, float blue) +{ + const float red_c = std::max<float>(0, std::min(g_sharedexp_max, red)); + const float green_c = std::max<float>(0, std::min(g_sharedexp_max, green)); + const float blue_c = std::max<float>(0, std::min(g_sharedexp_max, blue)); + + const float max_c = std::max<float>(std::max<float>(red_c, green_c), blue_c); + const float exp_p = + std::max<float>(-g_sharedexp_bias - 1, floor(log(max_c))) + 1 + g_sharedexp_bias; + const int max_s = static_cast<int>( + floor((max_c / (pow(2.0f, exp_p - g_sharedexp_bias - g_sharedexp_mantissabits))) + 0.5f)); + const int exp_s = + static_cast<int>((max_s < pow(2.0f, g_sharedexp_mantissabits)) ? exp_p : exp_p + 1); + + RGB9E5Data output; + output.R = static_cast<unsigned int>( + floor((red_c / (pow(2.0f, exp_s - g_sharedexp_bias - g_sharedexp_mantissabits))) + 0.5f)); + output.G = static_cast<unsigned int>( + floor((green_c / (pow(2.0f, exp_s - g_sharedexp_bias - g_sharedexp_mantissabits))) + 0.5f)); + output.B = static_cast<unsigned int>( + floor((blue_c / (pow(2.0f, exp_s - g_sharedexp_bias - g_sharedexp_mantissabits))) + 0.5f)); + output.E = exp_s; + + return bitCast<unsigned int>(output); +} + +void convert999E5toRGBFloats(unsigned int input, float *red, float *green, float *blue) +{ + const RGB9E5Data *inputData = reinterpret_cast<const RGB9E5Data *>(&input); + + *red = + inputData->R * pow(2.0f, (int)inputData->E - g_sharedexp_bias - g_sharedexp_mantissabits); + *green = + inputData->G * pow(2.0f, (int)inputData->E - g_sharedexp_bias - g_sharedexp_mantissabits); + *blue = + inputData->B * pow(2.0f, (int)inputData->E - g_sharedexp_bias - g_sharedexp_mantissabits); +} + +int BitCountPolyfill(uint32_t bits) +{ + int ones = 0; + while (bits) + { + ones += bool(bits & 1); + bits >>= 1; + } + return ones; +} + +} // namespace gl diff --git a/gfx/angle/checkout/src/common/mathutil.h b/gfx/angle/checkout/src/common/mathutil.h new file mode 100644 index 0000000000..798ddba597 --- /dev/null +++ b/gfx/angle/checkout/src/common/mathutil.h @@ -0,0 +1,1305 @@ +// +// Copyright (c) 2002-2013 The ANGLE Project Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// + +// mathutil.h: Math and bit manipulation functions. + +#ifndef COMMON_MATHUTIL_H_ +#define COMMON_MATHUTIL_H_ + +#include <math.h> +#include <stdint.h> +#include <stdlib.h> +#include <string.h> +#include <algorithm> +#include <limits> + +#include <anglebase/numerics/safe_math.h> + +#include "common/debug.h" +#include "common/platform.h" + +namespace angle +{ +using base::CheckedNumeric; +using base::IsValueInRangeForNumericType; +} // namespace angle + +namespace gl +{ + +const unsigned int Float32One = 0x3F800000; +const unsigned short Float16One = 0x3C00; + +template <typename T> +inline bool isPow2(T x) +{ + static_assert(std::is_integral<T>::value, "isPow2 must be called on an integer type."); + return (x & (x - 1)) == 0 && (x != 0); +} + +inline int log2(int x) +{ + int r = 0; + while ((x >> r) > 1) + r++; + return r; +} + +inline unsigned int ceilPow2(unsigned int x) +{ + if (x != 0) + x--; + x |= x >> 1; + x |= x >> 2; + x |= x >> 4; + x |= x >> 8; + x |= x >> 16; + x++; + + return x; +} + +template <typename DestT, typename SrcT> +inline DestT clampCast(SrcT value) +{ + // For floating-point types with denormalization, min returns the minimum positive normalized + // value. To find the value that has no values less than it, use numeric_limits::lowest. + constexpr const long double destLo = + static_cast<long double>(std::numeric_limits<DestT>::lowest()); + constexpr const long double destHi = + static_cast<long double>(std::numeric_limits<DestT>::max()); + constexpr const long double srcLo = + static_cast<long double>(std::numeric_limits<SrcT>::lowest()); + constexpr long double srcHi = static_cast<long double>(std::numeric_limits<SrcT>::max()); + + if (destHi < srcHi) + { + DestT destMax = std::numeric_limits<DestT>::max(); + if (value >= static_cast<SrcT>(destMax)) + { + return destMax; + } + } + + if (destLo > srcLo) + { + DestT destLow = std::numeric_limits<DestT>::lowest(); + if (value <= static_cast<SrcT>(destLow)) + { + return destLow; + } + } + + return static_cast<DestT>(value); +} + +// Specialize clampCast for bool->int conversion to avoid MSVS 2015 performance warning when the max +// value is casted to the source type. +template <> +inline unsigned int clampCast(bool value) +{ + return static_cast<unsigned int>(value); +} + +template <> +inline int clampCast(bool value) +{ + return static_cast<int>(value); +} + +template <typename T, typename MIN, typename MAX> +inline T clamp(T x, MIN min, MAX max) +{ + // Since NaNs fail all comparison tests, a NaN value will default to min + return x > min ? (x > max ? max : x) : min; +} + +inline float clamp01(float x) +{ + return clamp(x, 0.0f, 1.0f); +} + +template <const int n> +inline unsigned int unorm(float x) +{ + const unsigned int max = 0xFFFFFFFF >> (32 - n); + + if (x > 1) + { + return max; + } + else if (x < 0) + { + return 0; + } + else + { + return (unsigned int)(max * x + 0.5f); + } +} + +inline bool supportsSSE2() +{ +#if defined(ANGLE_USE_SSE) + static bool checked = false; + static bool supports = false; + + if (checked) + { + return supports; + } + +# if defined(ANGLE_PLATFORM_WINDOWS) && !defined(_M_ARM) && !defined(_M_ARM64) + { + int info[4]; + __cpuid(info, 0); + + if (info[0] >= 1) + { + __cpuid(info, 1); + + supports = (info[3] >> 26) & 1; + } + } +# endif // defined(ANGLE_PLATFORM_WINDOWS) && !defined(_M_ARM) && !defined(_M_ARM64) + checked = true; + return supports; +#else // defined(ANGLE_USE_SSE) + return false; +#endif +} + +template <typename destType, typename sourceType> +destType bitCast(const sourceType &source) +{ + size_t copySize = std::min(sizeof(destType), sizeof(sourceType)); + destType output; + memcpy(&output, &source, copySize); + return output; +} + +inline unsigned short float32ToFloat16(float fp32) +{ + unsigned int fp32i = bitCast<unsigned int>(fp32); + unsigned int sign = (fp32i & 0x80000000) >> 16; + unsigned int abs = fp32i & 0x7FFFFFFF; + + if (abs > 0x7F800000) + { // NaN + return 0x7FFF; + } + else if (abs > 0x47FFEFFF) + { // Infinity + return static_cast<uint16_t>(sign | 0x7C00); + } + else if (abs < 0x38800000) // Denormal + { + unsigned int mantissa = (abs & 0x007FFFFF) | 0x00800000; + int e = 113 - (abs >> 23); + + if (e < 24) + { + abs = mantissa >> e; + } + else + { + abs = 0; + } + + return static_cast<unsigned short>(sign | (abs + 0x00000FFF + ((abs >> 13) & 1)) >> 13); + } + else + { + return static_cast<unsigned short>( + sign | (abs + 0xC8000000 + 0x00000FFF + ((abs >> 13) & 1)) >> 13); + } +} + +float float16ToFloat32(unsigned short h); + +unsigned int convertRGBFloatsTo999E5(float red, float green, float blue); +void convert999E5toRGBFloats(unsigned int input, float *red, float *green, float *blue); + +inline unsigned short float32ToFloat11(float fp32) +{ + const unsigned int float32MantissaMask = 0x7FFFFF; + const unsigned int float32ExponentMask = 0x7F800000; + const unsigned int float32SignMask = 0x80000000; + const unsigned int float32ValueMask = ~float32SignMask; + const unsigned int float32ExponentFirstBit = 23; + const unsigned int float32ExponentBias = 127; + + const unsigned short float11Max = 0x7BF; + const unsigned short float11MantissaMask = 0x3F; + const unsigned short float11ExponentMask = 0x7C0; + const unsigned short float11BitMask = 0x7FF; + const unsigned int float11ExponentBias = 14; + + const unsigned int float32Maxfloat11 = 0x477E0000; + const unsigned int float32Minfloat11 = 0x38800000; + + const unsigned int float32Bits = bitCast<unsigned int>(fp32); + const bool float32Sign = (float32Bits & float32SignMask) == float32SignMask; + + unsigned int float32Val = float32Bits & float32ValueMask; + + if ((float32Val & float32ExponentMask) == float32ExponentMask) + { + // INF or NAN + if ((float32Val & float32MantissaMask) != 0) + { + return float11ExponentMask | + (((float32Val >> 17) | (float32Val >> 11) | (float32Val >> 6) | (float32Val)) & + float11MantissaMask); + } + else if (float32Sign) + { + // -INF is clamped to 0 since float11 is positive only + return 0; + } + else + { + return float11ExponentMask; + } + } + else if (float32Sign) + { + // float11 is positive only, so clamp to zero + return 0; + } + else if (float32Val > float32Maxfloat11) + { + // The number is too large to be represented as a float11, set to max + return float11Max; + } + else + { + if (float32Val < float32Minfloat11) + { + // The number is too small to be represented as a normalized float11 + // Convert it to a denormalized value. + const unsigned int shift = (float32ExponentBias - float11ExponentBias) - + (float32Val >> float32ExponentFirstBit); + float32Val = + ((1 << float32ExponentFirstBit) | (float32Val & float32MantissaMask)) >> shift; + } + else + { + // Rebias the exponent to represent the value as a normalized float11 + float32Val += 0xC8000000; + } + + return ((float32Val + 0xFFFF + ((float32Val >> 17) & 1)) >> 17) & float11BitMask; + } +} + +inline unsigned short float32ToFloat10(float fp32) +{ + const unsigned int float32MantissaMask = 0x7FFFFF; + const unsigned int float32ExponentMask = 0x7F800000; + const unsigned int float32SignMask = 0x80000000; + const unsigned int float32ValueMask = ~float32SignMask; + const unsigned int float32ExponentFirstBit = 23; + const unsigned int float32ExponentBias = 127; + + const unsigned short float10Max = 0x3DF; + const unsigned short float10MantissaMask = 0x1F; + const unsigned short float10ExponentMask = 0x3E0; + const unsigned short float10BitMask = 0x3FF; + const unsigned int float10ExponentBias = 14; + + const unsigned int float32Maxfloat10 = 0x477C0000; + const unsigned int float32Minfloat10 = 0x38800000; + + const unsigned int float32Bits = bitCast<unsigned int>(fp32); + const bool float32Sign = (float32Bits & float32SignMask) == float32SignMask; + + unsigned int float32Val = float32Bits & float32ValueMask; + + if ((float32Val & float32ExponentMask) == float32ExponentMask) + { + // INF or NAN + if ((float32Val & float32MantissaMask) != 0) + { + return float10ExponentMask | + (((float32Val >> 18) | (float32Val >> 13) | (float32Val >> 3) | (float32Val)) & + float10MantissaMask); + } + else if (float32Sign) + { + // -INF is clamped to 0 since float11 is positive only + return 0; + } + else + { + return float10ExponentMask; + } + } + else if (float32Sign) + { + // float10 is positive only, so clamp to zero + return 0; + } + else if (float32Val > float32Maxfloat10) + { + // The number is too large to be represented as a float11, set to max + return float10Max; + } + else + { + if (float32Val < float32Minfloat10) + { + // The number is too small to be represented as a normalized float11 + // Convert it to a denormalized value. + const unsigned int shift = (float32ExponentBias - float10ExponentBias) - + (float32Val >> float32ExponentFirstBit); + float32Val = + ((1 << float32ExponentFirstBit) | (float32Val & float32MantissaMask)) >> shift; + } + else + { + // Rebias the exponent to represent the value as a normalized float11 + float32Val += 0xC8000000; + } + + return ((float32Val + 0x1FFFF + ((float32Val >> 18) & 1)) >> 18) & float10BitMask; + } +} + +inline float float11ToFloat32(unsigned short fp11) +{ + unsigned short exponent = (fp11 >> 6) & 0x1F; + unsigned short mantissa = fp11 & 0x3F; + + if (exponent == 0x1F) + { + // INF or NAN + return bitCast<float>(0x7f800000 | (mantissa << 17)); + } + else + { + if (exponent != 0) + { + // normalized + } + else if (mantissa != 0) + { + // The value is denormalized + exponent = 1; + + do + { + exponent--; + mantissa <<= 1; + } while ((mantissa & 0x40) == 0); + + mantissa = mantissa & 0x3F; + } + else // The value is zero + { + exponent = static_cast<unsigned short>(-112); + } + + return bitCast<float>(((exponent + 112) << 23) | (mantissa << 17)); + } +} + +inline float float10ToFloat32(unsigned short fp11) +{ + unsigned short exponent = (fp11 >> 5) & 0x1F; + unsigned short mantissa = fp11 & 0x1F; + + if (exponent == 0x1F) + { + // INF or NAN + return bitCast<float>(0x7f800000 | (mantissa << 17)); + } + else + { + if (exponent != 0) + { + // normalized + } + else if (mantissa != 0) + { + // The value is denormalized + exponent = 1; + + do + { + exponent--; + mantissa <<= 1; + } while ((mantissa & 0x20) == 0); + + mantissa = mantissa & 0x1F; + } + else // The value is zero + { + exponent = static_cast<unsigned short>(-112); + } + + return bitCast<float>(((exponent + 112) << 23) | (mantissa << 18)); + } +} + +// Convers to and from float and 16.16 fixed point format. + +inline float FixedToFloat(uint32_t fixedInput) +{ + return static_cast<float>(fixedInput) / 65536.0f; +} + +inline uint32_t FloatToFixed(float floatInput) +{ + static constexpr uint32_t kHighest = 32767 * 65536 + 65535; + static constexpr uint32_t kLowest = static_cast<uint32_t>(-32768 * 65536 + 65535); + + if (floatInput > 32767.65535) + { + return kHighest; + } + else if (floatInput < -32768.65535) + { + return kLowest; + } + else + { + return static_cast<uint32_t>(floatInput * 65536); + } +} + +template <typename T> +inline float normalizedToFloat(T input) +{ + static_assert(std::numeric_limits<T>::is_integer, "T must be an integer."); + + if (sizeof(T) > 2) + { + // float has only a 23 bit mantissa, so we need to do the calculation in double precision + constexpr double inverseMax = 1.0 / std::numeric_limits<T>::max(); + return static_cast<float>(input * inverseMax); + } + else + { + constexpr float inverseMax = 1.0f / std::numeric_limits<T>::max(); + return input * inverseMax; + } +} + +template <unsigned int inputBitCount, typename T> +inline float normalizedToFloat(T input) +{ + static_assert(std::numeric_limits<T>::is_integer, "T must be an integer."); + static_assert(inputBitCount < (sizeof(T) * 8), "T must have more bits than inputBitCount."); + + if (inputBitCount > 23) + { + // float has only a 23 bit mantissa, so we need to do the calculation in double precision + constexpr double inverseMax = 1.0 / ((1 << inputBitCount) - 1); + return static_cast<float>(input * inverseMax); + } + else + { + constexpr float inverseMax = 1.0f / ((1 << inputBitCount) - 1); + return input * inverseMax; + } +} + +template <typename T> +inline T floatToNormalized(float input) +{ + if (sizeof(T) > 2) + { + // float has only a 23 bit mantissa, so we need to do the calculation in double precision + return static_cast<T>(std::numeric_limits<T>::max() * static_cast<double>(input) + 0.5); + } + else + { + return static_cast<T>(std::numeric_limits<T>::max() * input + 0.5f); + } +} + +template <unsigned int outputBitCount, typename T> +inline T floatToNormalized(float input) +{ + static_assert(outputBitCount < (sizeof(T) * 8), "T must have more bits than outputBitCount."); + + if (outputBitCount > 23) + { + // float has only a 23 bit mantissa, so we need to do the calculation in double precision + return static_cast<T>(((1 << outputBitCount) - 1) * static_cast<double>(input) + 0.5); + } + else + { + return static_cast<T>(((1 << outputBitCount) - 1) * input + 0.5f); + } +} + +template <unsigned int inputBitCount, unsigned int inputBitStart, typename T> +inline T getShiftedData(T input) +{ + static_assert(inputBitCount + inputBitStart <= (sizeof(T) * 8), + "T must have at least as many bits as inputBitCount + inputBitStart."); + const T mask = (1 << inputBitCount) - 1; + return (input >> inputBitStart) & mask; +} + +template <unsigned int inputBitCount, unsigned int inputBitStart, typename T> +inline T shiftData(T input) +{ + static_assert(inputBitCount + inputBitStart <= (sizeof(T) * 8), + "T must have at least as many bits as inputBitCount + inputBitStart."); + const T mask = (1 << inputBitCount) - 1; + return (input & mask) << inputBitStart; +} + +inline unsigned int CountLeadingZeros(uint32_t x) +{ + // Use binary search to find the amount of leading zeros. + unsigned int zeros = 32u; + uint32_t y; + + y = x >> 16u; + if (y != 0) + { + zeros = zeros - 16u; + x = y; + } + y = x >> 8u; + if (y != 0) + { + zeros = zeros - 8u; + x = y; + } + y = x >> 4u; + if (y != 0) + { + zeros = zeros - 4u; + x = y; + } + y = x >> 2u; + if (y != 0) + { + zeros = zeros - 2u; + x = y; + } + y = x >> 1u; + if (y != 0) + { + return zeros - 2u; + } + return zeros - x; +} + +inline unsigned char average(unsigned char a, unsigned char b) +{ + return ((a ^ b) >> 1) + (a & b); +} + +inline signed char average(signed char a, signed char b) +{ + return ((short)a + (short)b) / 2; +} + +inline unsigned short average(unsigned short a, unsigned short b) +{ + return ((a ^ b) >> 1) + (a & b); +} + +inline signed short average(signed short a, signed short b) +{ + return ((int)a + (int)b) / 2; +} + +inline unsigned int average(unsigned int a, unsigned int b) +{ + return ((a ^ b) >> 1) + (a & b); +} + +inline int average(int a, int b) +{ + long long average = (static_cast<long long>(a) + static_cast<long long>(b)) / 2ll; + return static_cast<int>(average); +} + +inline float average(float a, float b) +{ + return (a + b) * 0.5f; +} + +inline unsigned short averageHalfFloat(unsigned short a, unsigned short b) +{ + return float32ToFloat16((float16ToFloat32(a) + float16ToFloat32(b)) * 0.5f); +} + +inline unsigned int averageFloat11(unsigned int a, unsigned int b) +{ + return float32ToFloat11((float11ToFloat32(static_cast<unsigned short>(a)) + + float11ToFloat32(static_cast<unsigned short>(b))) * + 0.5f); +} + +inline unsigned int averageFloat10(unsigned int a, unsigned int b) +{ + return float32ToFloat10((float10ToFloat32(static_cast<unsigned short>(a)) + + float10ToFloat32(static_cast<unsigned short>(b))) * + 0.5f); +} + +template <typename T> +class Range +{ + public: + Range() {} + Range(T lo, T hi) : mLow(lo), mHigh(hi) {} + + T length() const { return (empty() ? 0 : (mHigh - mLow)); } + + bool intersects(Range<T> other) + { + if (mLow <= other.mLow) + { + return other.mLow < mHigh; + } + else + { + return mLow < other.mHigh; + } + } + + // Assumes that end is non-inclusive.. for example, extending to 5 will make "end" 6. + void extend(T value) + { + mLow = value < mLow ? value : mLow; + mHigh = value >= mHigh ? (value + 1) : mHigh; + } + + bool empty() const { return mHigh <= mLow; } + + bool contains(T value) const { return value >= mLow && value < mHigh; } + + class Iterator final + { + public: + Iterator(T value) : mCurrent(value) {} + + Iterator &operator++() + { + mCurrent++; + return *this; + } + bool operator==(const Iterator &other) const { return mCurrent == other.mCurrent; } + bool operator!=(const Iterator &other) const { return mCurrent != other.mCurrent; } + T operator*() const { return mCurrent; } + + private: + T mCurrent; + }; + + Iterator begin() const { return Iterator(mLow); } + + Iterator end() const { return Iterator(mHigh); } + + T low() const { return mLow; } + T high() const { return mHigh; } + + void invalidate() + { + mLow = std::numeric_limits<T>::max(); + mHigh = std::numeric_limits<T>::min(); + } + + private: + T mLow; + T mHigh; +}; + +typedef Range<int> RangeI; +typedef Range<unsigned int> RangeUI; + +struct IndexRange +{ + struct Undefined + {}; + IndexRange(Undefined) {} + IndexRange() : IndexRange(0, 0, 0) {} + IndexRange(size_t start_, size_t end_, size_t vertexIndexCount_) + : start(start_), end(end_), vertexIndexCount(vertexIndexCount_) + { + ASSERT(start <= end); + } + + // Number of vertices in the range. + size_t vertexCount() const { return (end - start) + 1; } + + // Inclusive range of indices that are not primitive restart + size_t start; + size_t end; + + // Number of non-primitive restart indices + size_t vertexIndexCount; +}; + +// Combine a floating-point value representing a mantissa (x) and an integer exponent (exp) into a +// floating-point value. As in GLSL ldexp() built-in. +inline float Ldexp(float x, int exp) +{ + if (exp > 128) + { + return std::numeric_limits<float>::infinity(); + } + if (exp < -126) + { + return 0.0f; + } + double result = static_cast<double>(x) * std::pow(2.0, static_cast<double>(exp)); + return static_cast<float>(result); +} + +// First, both normalized floating-point values are converted into 16-bit integer values. +// Then, the results are packed into the returned 32-bit unsigned integer. +// The first float value will be written to the least significant bits of the output; +// the last float value will be written to the most significant bits. +// The conversion of each value to fixed point is done as follows : +// packSnorm2x16 : round(clamp(c, -1, +1) * 32767.0) +inline uint32_t packSnorm2x16(float f1, float f2) +{ + int16_t leastSignificantBits = static_cast<int16_t>(roundf(clamp(f1, -1.0f, 1.0f) * 32767.0f)); + int16_t mostSignificantBits = static_cast<int16_t>(roundf(clamp(f2, -1.0f, 1.0f) * 32767.0f)); + return static_cast<uint32_t>(mostSignificantBits) << 16 | + (static_cast<uint32_t>(leastSignificantBits) & 0xFFFF); +} + +// First, unpacks a single 32-bit unsigned integer u into a pair of 16-bit unsigned integers. Then, +// each component is converted to a normalized floating-point value to generate the returned two +// float values. The first float value will be extracted from the least significant bits of the +// input; the last float value will be extracted from the most-significant bits. The conversion for +// unpacked fixed-point value to floating point is done as follows: unpackSnorm2x16 : clamp(f / +// 32767.0, -1, +1) +inline void unpackSnorm2x16(uint32_t u, float *f1, float *f2) +{ + int16_t leastSignificantBits = static_cast<int16_t>(u & 0xFFFF); + int16_t mostSignificantBits = static_cast<int16_t>(u >> 16); + *f1 = clamp(static_cast<float>(leastSignificantBits) / 32767.0f, -1.0f, 1.0f); + *f2 = clamp(static_cast<float>(mostSignificantBits) / 32767.0f, -1.0f, 1.0f); +} + +// First, both normalized floating-point values are converted into 16-bit integer values. +// Then, the results are packed into the returned 32-bit unsigned integer. +// The first float value will be written to the least significant bits of the output; +// the last float value will be written to the most significant bits. +// The conversion of each value to fixed point is done as follows: +// packUnorm2x16 : round(clamp(c, 0, +1) * 65535.0) +inline uint32_t packUnorm2x16(float f1, float f2) +{ + uint16_t leastSignificantBits = static_cast<uint16_t>(roundf(clamp(f1, 0.0f, 1.0f) * 65535.0f)); + uint16_t mostSignificantBits = static_cast<uint16_t>(roundf(clamp(f2, 0.0f, 1.0f) * 65535.0f)); + return static_cast<uint32_t>(mostSignificantBits) << 16 | + static_cast<uint32_t>(leastSignificantBits); +} + +// First, unpacks a single 32-bit unsigned integer u into a pair of 16-bit unsigned integers. Then, +// each component is converted to a normalized floating-point value to generate the returned two +// float values. The first float value will be extracted from the least significant bits of the +// input; the last float value will be extracted from the most-significant bits. The conversion for +// unpacked fixed-point value to floating point is done as follows: unpackUnorm2x16 : f / 65535.0 +inline void unpackUnorm2x16(uint32_t u, float *f1, float *f2) +{ + uint16_t leastSignificantBits = static_cast<uint16_t>(u & 0xFFFF); + uint16_t mostSignificantBits = static_cast<uint16_t>(u >> 16); + *f1 = static_cast<float>(leastSignificantBits) / 65535.0f; + *f2 = static_cast<float>(mostSignificantBits) / 65535.0f; +} + +// Helper functions intended to be used only here. +namespace priv +{ + +inline uint8_t ToPackedUnorm8(float f) +{ + return static_cast<uint8_t>(roundf(clamp(f, 0.0f, 1.0f) * 255.0f)); +} + +inline int8_t ToPackedSnorm8(float f) +{ + return static_cast<int8_t>(roundf(clamp(f, -1.0f, 1.0f) * 127.0f)); +} + +} // namespace priv + +// Packs 4 normalized unsigned floating-point values to a single 32-bit unsigned integer. Works +// similarly to packUnorm2x16. The floats are clamped to the range 0.0 to 1.0, and written to the +// unsigned integer starting from the least significant bits. +inline uint32_t PackUnorm4x8(float f1, float f2, float f3, float f4) +{ + uint8_t bits[4]; + bits[0] = priv::ToPackedUnorm8(f1); + bits[1] = priv::ToPackedUnorm8(f2); + bits[2] = priv::ToPackedUnorm8(f3); + bits[3] = priv::ToPackedUnorm8(f4); + uint32_t result = 0u; + for (int i = 0; i < 4; ++i) + { + int shift = i * 8; + result |= (static_cast<uint32_t>(bits[i]) << shift); + } + return result; +} + +// Unpacks 4 normalized unsigned floating-point values from a single 32-bit unsigned integer into f. +// Works similarly to unpackUnorm2x16. The floats are unpacked starting from the least significant +// bits. +inline void UnpackUnorm4x8(uint32_t u, float *f) +{ + for (int i = 0; i < 4; ++i) + { + int shift = i * 8; + uint8_t bits = static_cast<uint8_t>((u >> shift) & 0xFF); + f[i] = static_cast<float>(bits) / 255.0f; + } +} + +// Packs 4 normalized signed floating-point values to a single 32-bit unsigned integer. The floats +// are clamped to the range -1.0 to 1.0, and written to the unsigned integer starting from the least +// significant bits. +inline uint32_t PackSnorm4x8(float f1, float f2, float f3, float f4) +{ + int8_t bits[4]; + bits[0] = priv::ToPackedSnorm8(f1); + bits[1] = priv::ToPackedSnorm8(f2); + bits[2] = priv::ToPackedSnorm8(f3); + bits[3] = priv::ToPackedSnorm8(f4); + uint32_t result = 0u; + for (int i = 0; i < 4; ++i) + { + int shift = i * 8; + result |= ((static_cast<uint32_t>(bits[i]) & 0xFF) << shift); + } + return result; +} + +// Unpacks 4 normalized signed floating-point values from a single 32-bit unsigned integer into f. +// Works similarly to unpackSnorm2x16. The floats are unpacked starting from the least significant +// bits, and clamped to the range -1.0 to 1.0. +inline void UnpackSnorm4x8(uint32_t u, float *f) +{ + for (int i = 0; i < 4; ++i) + { + int shift = i * 8; + int8_t bits = static_cast<int8_t>((u >> shift) & 0xFF); + f[i] = clamp(static_cast<float>(bits) / 127.0f, -1.0f, 1.0f); + } +} + +// Returns an unsigned integer obtained by converting the two floating-point values to the 16-bit +// floating-point representation found in the OpenGL ES Specification, and then packing these +// two 16-bit integers into a 32-bit unsigned integer. +// f1: The 16 least-significant bits of the result; +// f2: The 16 most-significant bits. +inline uint32_t packHalf2x16(float f1, float f2) +{ + uint16_t leastSignificantBits = static_cast<uint16_t>(float32ToFloat16(f1)); + uint16_t mostSignificantBits = static_cast<uint16_t>(float32ToFloat16(f2)); + return static_cast<uint32_t>(mostSignificantBits) << 16 | + static_cast<uint32_t>(leastSignificantBits); +} + +// Returns two floating-point values obtained by unpacking a 32-bit unsigned integer into a pair of +// 16-bit values, interpreting those values as 16-bit floating-point numbers according to the OpenGL +// ES Specification, and converting them to 32-bit floating-point values. The first float value is +// obtained from the 16 least-significant bits of u; the second component is obtained from the 16 +// most-significant bits of u. +inline void unpackHalf2x16(uint32_t u, float *f1, float *f2) +{ + uint16_t leastSignificantBits = static_cast<uint16_t>(u & 0xFFFF); + uint16_t mostSignificantBits = static_cast<uint16_t>(u >> 16); + + *f1 = float16ToFloat32(leastSignificantBits); + *f2 = float16ToFloat32(mostSignificantBits); +} + +inline uint8_t sRGBToLinear(uint8_t srgbValue) +{ + float value = srgbValue / 255.0f; + if (value <= 0.04045f) + { + value = value / 12.92f; + } + else + { + value = std::pow((value + 0.055f) / 1.055f, 2.4f); + } + return static_cast<uint8_t>(clamp(value * 255.0f + 0.5f, 0.0f, 255.0f)); +} + +inline uint8_t linearToSRGB(uint8_t linearValue) +{ + float value = linearValue / 255.0f; + if (value <= 0.0f) + { + value = 0.0f; + } + else if (value < 0.0031308f) + { + value = value * 12.92f; + } + else if (value < 1.0f) + { + value = std::pow(value, 0.41666f) * 1.055f - 0.055f; + } + else + { + value = 1.0f; + } + return static_cast<uint8_t>(clamp(value * 255.0f + 0.5f, 0.0f, 255.0f)); +} + +// Reverse the order of the bits. +inline uint32_t BitfieldReverse(uint32_t value) +{ + // TODO(oetuaho@nvidia.com): Optimize this if needed. There don't seem to be compiler intrinsics + // for this, and right now it's not used in performance-critical paths. + uint32_t result = 0u; + for (size_t j = 0u; j < 32u; ++j) + { + result |= (((value >> j) & 1u) << (31u - j)); + } + return result; +} + +// Count the 1 bits. +#if defined(_M_IX86) || defined(_M_X64) +# define ANGLE_HAS_BITCOUNT_32 +inline int BitCount(uint32_t bits) +{ + return static_cast<int>(__popcnt(bits)); +} +# if defined(_M_X64) +# define ANGLE_HAS_BITCOUNT_64 +inline int BitCount(uint64_t bits) +{ + return static_cast<int>(__popcnt64(bits)); +} +# endif // defined(_M_X64) +#endif // defined(_M_IX86) || defined(_M_X64) + +#if defined(ANGLE_PLATFORM_POSIX) +# define ANGLE_HAS_BITCOUNT_32 +inline int BitCount(uint32_t bits) +{ + return __builtin_popcount(bits); +} + +# if defined(ANGLE_IS_64_BIT_CPU) +# define ANGLE_HAS_BITCOUNT_64 +inline int BitCount(uint64_t bits) +{ + return __builtin_popcountll(bits); +} +# endif // defined(ANGLE_IS_64_BIT_CPU) +#endif // defined(ANGLE_PLATFORM_POSIX) + +int BitCountPolyfill(uint32_t bits); + +#if !defined(ANGLE_HAS_BITCOUNT_32) +inline int BitCount(const uint32_t bits) +{ + return BitCountPolyfill(bits); +} +#endif // !defined(ANGLE_HAS_BITCOUNT_32) + +#if !defined(ANGLE_HAS_BITCOUNT_64) +inline int BitCount(const uint64_t bits) +{ + return BitCount(static_cast<uint32_t>(bits >> 32)) + BitCount(static_cast<uint32_t>(bits)); +} +#endif // !defined(ANGLE_HAS_BITCOUNT_64) +#undef ANGLE_HAS_BITCOUNT_32 +#undef ANGLE_HAS_BITCOUNT_64 + +inline int BitCount(uint8_t bits) +{ + return BitCount(static_cast<uint32_t>(bits)); +} + +inline int BitCount(uint16_t bits) +{ + return BitCount(static_cast<uint32_t>(bits)); +} + +#if defined(ANGLE_PLATFORM_WINDOWS) +// Return the index of the least significant bit set. Indexing is such that bit 0 is the least +// significant bit. Implemented for different bit widths on different platforms. +inline unsigned long ScanForward(uint32_t bits) +{ + ASSERT(bits != 0u); + unsigned long firstBitIndex = 0ul; + unsigned char ret = _BitScanForward(&firstBitIndex, bits); + ASSERT(ret != 0u); + return firstBitIndex; +} + +# if defined(ANGLE_IS_64_BIT_CPU) +inline unsigned long ScanForward(uint64_t bits) +{ + ASSERT(bits != 0u); + unsigned long firstBitIndex = 0ul; + unsigned char ret = _BitScanForward64(&firstBitIndex, bits); + ASSERT(ret != 0u); + return firstBitIndex; +} +# endif // defined(ANGLE_IS_64_BIT_CPU) +#endif // defined(ANGLE_PLATFORM_WINDOWS) + +#if defined(ANGLE_PLATFORM_POSIX) +inline unsigned long ScanForward(uint32_t bits) +{ + ASSERT(bits != 0u); + return static_cast<unsigned long>(__builtin_ctz(bits)); +} + +# if defined(ANGLE_IS_64_BIT_CPU) +inline unsigned long ScanForward(uint64_t bits) +{ + ASSERT(bits != 0u); + return static_cast<unsigned long>(__builtin_ctzll(bits)); +} +# endif // defined(ANGLE_IS_64_BIT_CPU) +#endif // defined(ANGLE_PLATFORM_POSIX) + +inline unsigned long ScanForward(uint8_t bits) +{ + return ScanForward(static_cast<uint32_t>(bits)); +} + +inline unsigned long ScanForward(uint16_t bits) +{ + return ScanForward(static_cast<uint32_t>(bits)); +} + +// Return the index of the most significant bit set. Indexing is such that bit 0 is the least +// significant bit. +inline unsigned long ScanReverse(unsigned long bits) +{ + ASSERT(bits != 0u); +#if defined(ANGLE_PLATFORM_WINDOWS) + unsigned long lastBitIndex = 0ul; + unsigned char ret = _BitScanReverse(&lastBitIndex, bits); + ASSERT(ret != 0u); + return lastBitIndex; +#elif defined(ANGLE_PLATFORM_POSIX) + return static_cast<unsigned long>(sizeof(unsigned long) * CHAR_BIT - 1 - __builtin_clzl(bits)); +#else +# error Please implement bit-scan-reverse for your platform! +#endif +} + +// Returns -1 on 0, otherwise the index of the least significant 1 bit as in GLSL. +template <typename T> +int FindLSB(T bits) +{ + static_assert(std::is_integral<T>::value, "must be integral type."); + if (bits == 0u) + { + return -1; + } + else + { + return static_cast<int>(ScanForward(bits)); + } +} + +// Returns -1 on 0, otherwise the index of the most significant 1 bit as in GLSL. +template <typename T> +int FindMSB(T bits) +{ + static_assert(std::is_integral<T>::value, "must be integral type."); + if (bits == 0u) + { + return -1; + } + else + { + return static_cast<int>(ScanReverse(bits)); + } +} + +// Returns whether the argument is Not a Number. +// IEEE 754 single precision NaN representation: Exponent(8 bits) - 255, Mantissa(23 bits) - +// non-zero. +inline bool isNaN(float f) +{ + // Exponent mask: ((1u << 8) - 1u) << 23 = 0x7f800000u + // Mantissa mask: ((1u << 23) - 1u) = 0x7fffffu + return ((bitCast<uint32_t>(f) & 0x7f800000u) == 0x7f800000u) && + (bitCast<uint32_t>(f) & 0x7fffffu); +} + +// Returns whether the argument is infinity. +// IEEE 754 single precision infinity representation: Exponent(8 bits) - 255, Mantissa(23 bits) - +// zero. +inline bool isInf(float f) +{ + // Exponent mask: ((1u << 8) - 1u) << 23 = 0x7f800000u + // Mantissa mask: ((1u << 23) - 1u) = 0x7fffffu + return ((bitCast<uint32_t>(f) & 0x7f800000u) == 0x7f800000u) && + !(bitCast<uint32_t>(f) & 0x7fffffu); +} + +namespace priv +{ +template <unsigned int N, unsigned int R> +struct iSquareRoot +{ + static constexpr unsigned int solve() + { + return (R * R > N) + ? 0 + : ((R * R == N) ? R : static_cast<unsigned int>(iSquareRoot<N, R + 1>::value)); + } + enum Result + { + value = iSquareRoot::solve() + }; +}; + +template <unsigned int N> +struct iSquareRoot<N, N> +{ + enum result + { + value = N + }; +}; + +} // namespace priv + +template <unsigned int N> +constexpr unsigned int iSquareRoot() +{ + return priv::iSquareRoot<N, 1>::value; +} + +// Sum, difference and multiplication operations for signed ints that wrap on 32-bit overflow. +// +// Unsigned types are defined to do arithmetic modulo 2^n in C++. For signed types, overflow +// behavior is undefined. + +template <typename T> +inline T WrappingSum(T lhs, T rhs) +{ + uint32_t lhsUnsigned = static_cast<uint32_t>(lhs); + uint32_t rhsUnsigned = static_cast<uint32_t>(rhs); + return static_cast<T>(lhsUnsigned + rhsUnsigned); +} + +template <typename T> +inline T WrappingDiff(T lhs, T rhs) +{ + uint32_t lhsUnsigned = static_cast<uint32_t>(lhs); + uint32_t rhsUnsigned = static_cast<uint32_t>(rhs); + return static_cast<T>(lhsUnsigned - rhsUnsigned); +} + +inline int32_t WrappingMul(int32_t lhs, int32_t rhs) +{ + int64_t lhsWide = static_cast<int64_t>(lhs); + int64_t rhsWide = static_cast<int64_t>(rhs); + // The multiplication is guaranteed not to overflow. + int64_t resultWide = lhsWide * rhsWide; + // Implement the desired wrapping behavior by masking out the high-order 32 bits. + resultWide = resultWide & 0xffffffffll; + // Casting to a narrower signed type is fine since the casted value is representable in the + // narrower type. + return static_cast<int32_t>(resultWide); +} + +inline float scaleScreenDimensionToNdc(float dimensionScreen, float viewportDimension) +{ + return 2.0f * dimensionScreen / viewportDimension; +} + +inline float scaleScreenCoordinateToNdc(float coordinateScreen, float viewportDimension) +{ + float halfShifted = coordinateScreen / viewportDimension; + return 2.0f * (halfShifted - 0.5f); +} + +} // namespace gl + +namespace rx +{ + +template <typename T> +T roundUp(const T value, const T alignment) +{ + auto temp = value + alignment - static_cast<T>(1); + return temp - temp % alignment; +} + +template <typename T> +angle::CheckedNumeric<T> CheckedRoundUp(const T value, const T alignment) +{ + angle::CheckedNumeric<T> checkedValue(value); + angle::CheckedNumeric<T> checkedAlignment(alignment); + return roundUp(checkedValue, checkedAlignment); +} + +inline constexpr unsigned int UnsignedCeilDivide(unsigned int value, unsigned int divisor) +{ + unsigned int divided = value / divisor; + return (divided + ((value % divisor == 0) ? 0 : 1)); +} + +#if defined(__has_builtin) +# define ANGLE_HAS_BUILTIN(x) __has_builtin(x) +#else +# define ANGLE_HAS_BUILTIN(x) 0 +#endif + +#if defined(_MSC_VER) + +# define ANGLE_ROTL(x, y) _rotl(x, y) +# define ANGLE_ROTL64(x, y) _rotl64(x, y) +# define ANGLE_ROTR16(x, y) _rotr16(x, y) + +#elif defined(__clang__) && ANGLE_HAS_BUILTIN(__builtin_rotateleft32) && \ + ANGLE_HAS_BUILTIN(__builtin_rotateleft64) && ANGLE_HAS_BUILTIN(__builtin_rotateright16) + +# define ANGLE_ROTL(x, y) __builtin_rotateleft32(x, y) +# define ANGLE_ROTL64(x, y) __builtin_rotateleft64(x, y) +# define ANGLE_ROTR16(x, y) __builtin_rotateright16(x, y) + +#else + +inline uint32_t RotL(uint32_t x, int8_t r) +{ + return (x << r) | (x >> (32 - r)); +} + +inline uint64_t RotL64(uint64_t x, int8_t r) +{ + return (x << r) | (x >> (64 - r)); +} + +inline uint16_t RotR16(uint16_t x, int8_t r) +{ + return (x >> r) | (x << (16 - r)); +} + +# define ANGLE_ROTL(x, y) ::rx::RotL(x, y) +# define ANGLE_ROTL64(x, y) ::rx::RotL64(x, y) +# define ANGLE_ROTR16(x, y) ::rx::RotR16(x, y) + +#endif // namespace rx + +constexpr unsigned int Log2(unsigned int bytes) +{ + return bytes == 1 ? 0 : (1 + Log2(bytes / 2)); +} +} // namespace rx + +#endif // COMMON_MATHUTIL_H_ diff --git a/gfx/angle/checkout/src/common/matrix_utils.cpp b/gfx/angle/checkout/src/common/matrix_utils.cpp new file mode 100644 index 0000000000..59ab4ca437 --- /dev/null +++ b/gfx/angle/checkout/src/common/matrix_utils.cpp @@ -0,0 +1,285 @@ +// +// Copyright 2018 The ANGLE Project Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// + +// matrix_utils.cpp: Contains implementations for Mat4 methods. + +#include "common/matrix_utils.h" + +namespace angle +{ + +Mat4::Mat4() : Mat4(1.f, 0.f, 0.f, 0.f, 0.f, 1.f, 0.f, 0.f, 0.f, 0.f, 1.f, 0.f, 0.f, 0.f, 0.f, 1.f) +{} + +Mat4::Mat4(const Matrix<float> generalMatrix) : Matrix(std::vector<float>(16, 0), 4, 4) +{ + unsigned int minCols = std::min((unsigned int)4, generalMatrix.columns()); + unsigned int minRows = std::min((unsigned int)4, generalMatrix.rows()); + for (unsigned int i = 0; i < minCols; i++) + { + for (unsigned int j = 0; j < minRows; j++) + { + mElements[j * minCols + i] = generalMatrix.at(j, i); + } + } +} + +Mat4::Mat4(const std::vector<float> &elements) : Matrix(elements, 4) {} + +Mat4::Mat4(const float *elements) : Matrix(elements, 4) {} + +Mat4::Mat4(float m00, + float m01, + float m02, + float m03, + float m10, + float m11, + float m12, + float m13, + float m20, + float m21, + float m22, + float m23, + float m30, + float m31, + float m32, + float m33) + : Matrix(std::vector<float>(16, 0), 4, 4) +{ + mElements[0] = m00; + mElements[1] = m01; + mElements[2] = m02; + mElements[3] = m03; + mElements[4] = m10; + mElements[5] = m11; + mElements[6] = m12; + mElements[7] = m13; + mElements[8] = m20; + mElements[9] = m21; + mElements[10] = m22; + mElements[11] = m23; + mElements[12] = m30; + mElements[13] = m31; + mElements[14] = m32; + mElements[15] = m33; +} + +// static +Mat4 Mat4::Rotate(float angle, const Vector3 &axis) +{ + auto axis_normalized = axis.normalized(); + float angle_radians = angle * (3.14159265358979323f / 180.0f); + float c = cos(angle_radians); + float ci = 1.f - c; + float s = sin(angle_radians); + + float x = axis_normalized.x(); + float y = axis_normalized.y(); + float z = axis_normalized.z(); + + float x2 = x * x; + float y2 = y * y; + float z2 = z * z; + + float xy = x * y; + float yz = y * z; + float zx = z * x; + + float r00 = c + ci * x2; + float r01 = ci * xy + s * z; + float r02 = ci * zx - s * y; + float r03 = 0.f; + + float r10 = ci * xy - s * z; + float r11 = c + ci * y2; + float r12 = ci * yz + s * x; + float r13 = 0.f; + + float r20 = ci * zx + s * y; + float r21 = ci * yz - s * x; + float r22 = c + ci * z2; + float r23 = 0.f; + + float r30 = 0.f; + float r31 = 0.f; + float r32 = 0.f; + float r33 = 1.f; + + return Mat4(r00, r01, r02, r03, r10, r11, r12, r13, r20, r21, r22, r23, r30, r31, r32, r33); +} + +// static +Mat4 Mat4::Translate(const Vector3 &t) +{ + float r00 = 1.f; + float r01 = 0.f; + float r02 = 0.f; + float r03 = 0.f; + + float r10 = 0.f; + float r11 = 1.f; + float r12 = 0.f; + float r13 = 0.f; + + float r20 = 0.f; + float r21 = 0.f; + float r22 = 1.f; + float r23 = 0.f; + + float r30 = t.x(); + float r31 = t.y(); + float r32 = t.z(); + float r33 = 1.f; + + return Mat4(r00, r01, r02, r03, r10, r11, r12, r13, r20, r21, r22, r23, r30, r31, r32, r33); +} + +// static +Mat4 Mat4::Scale(const Vector3 &s) +{ + float r00 = s.x(); + float r01 = 0.f; + float r02 = 0.f; + float r03 = 0.f; + + float r10 = 0.f; + float r11 = s.y(); + float r12 = 0.f; + float r13 = 0.f; + + float r20 = 0.f; + float r21 = 0.f; + float r22 = s.z(); + float r23 = 0.f; + + float r30 = 0.f; + float r31 = 0.f; + float r32 = 0.f; + float r33 = 1.f; + + return Mat4(r00, r01, r02, r03, r10, r11, r12, r13, r20, r21, r22, r23, r30, r31, r32, r33); +} + +// static +Mat4 Mat4::Frustum(float l, float r, float b, float t, float n, float f) +{ + float nn = 2.f * n; + float fpn = f + n; + float fmn = f - n; + float tpb = t + b; + float tmb = t - b; + float rpl = r + l; + float rml = r - l; + + float r00 = nn / rml; + float r01 = 0.f; + float r02 = 0.f; + float r03 = 0.f; + + float r10 = 0.f; + float r11 = nn / tmb; + float r12 = 0.f; + float r13 = 0.f; + + float r20 = rpl / rml; + float r21 = tpb / tmb; + float r22 = -fpn / fmn; + float r23 = -1.f; + + float r30 = 0.f; + float r31 = 0.f; + float r32 = -nn * f / fmn; + float r33 = 0.f; + + return Mat4(r00, r01, r02, r03, r10, r11, r12, r13, r20, r21, r22, r23, r30, r31, r32, r33); +} + +// static +Mat4 Mat4::Perspective(float fov, float aspectRatio, float n, float f) +{ + const float frustumHeight = tanf(static_cast<float>(fov / 360.0f * 3.14159265358979323)) * n; + const float frustumWidth = frustumHeight * aspectRatio; + return Frustum(-frustumWidth, frustumWidth, -frustumHeight, frustumHeight, n, f); +} + +// static +Mat4 Mat4::Ortho(float l, float r, float b, float t, float n, float f) +{ + float fpn = f + n; + float fmn = f - n; + float tpb = t + b; + float tmb = t - b; + float rpl = r + l; + float rml = r - l; + + float r00 = 2.f / rml; + float r01 = 0.f; + float r02 = 0.f; + float r03 = 0.f; + + float r10 = 0.f; + float r11 = 2.f / tmb; + float r12 = 0.f; + float r13 = 0.f; + + float r20 = 0.f; + float r21 = 0.f; + float r22 = -2.f / fmn; + float r23 = 0.f; + + float r30 = -rpl / rml; + float r31 = -tpb / tmb; + float r32 = -fpn / fmn; + float r33 = 1.f; + + return Mat4(r00, r01, r02, r03, r10, r11, r12, r13, r20, r21, r22, r23, r30, r31, r32, r33); +} + +Mat4 Mat4::product(const Mat4 &m) +{ + const float *a = mElements.data(); + const float *b = m.mElements.data(); + + return Mat4(a[0] * b[0] + a[4] * b[1] + a[8] * b[2] + a[12] * b[3], + a[1] * b[0] + a[5] * b[1] + a[9] * b[2] + a[13] * b[3], + a[2] * b[0] + a[6] * b[1] + a[10] * b[2] + a[14] * b[3], + a[3] * b[0] + a[7] * b[1] + a[11] * b[2] + a[15] * b[3], + + a[0] * b[4] + a[4] * b[5] + a[8] * b[6] + a[12] * b[7], + a[1] * b[4] + a[5] * b[5] + a[9] * b[6] + a[13] * b[7], + a[2] * b[4] + a[6] * b[5] + a[10] * b[6] + a[14] * b[7], + a[3] * b[4] + a[7] * b[5] + a[11] * b[6] + a[15] * b[7], + + a[0] * b[8] + a[4] * b[9] + a[8] * b[10] + a[12] * b[11], + a[1] * b[8] + a[5] * b[9] + a[9] * b[10] + a[13] * b[11], + a[2] * b[8] + a[6] * b[9] + a[10] * b[10] + a[14] * b[11], + a[3] * b[8] + a[7] * b[9] + a[11] * b[10] + a[15] * b[11], + + a[0] * b[12] + a[4] * b[13] + a[8] * b[14] + a[12] * b[15], + a[1] * b[12] + a[5] * b[13] + a[9] * b[14] + a[13] * b[15], + a[2] * b[12] + a[6] * b[13] + a[10] * b[14] + a[14] * b[15], + a[3] * b[12] + a[7] * b[13] + a[11] * b[14] + a[15] * b[15]); +} + +Vector4 Mat4::product(const Vector4 &b) +{ + return Vector4( + mElements[0] * b.x() + mElements[4] * b.y() + mElements[8] * b.z() + mElements[12] * b.w(), + mElements[1] * b.x() + mElements[5] * b.y() + mElements[9] * b.z() + mElements[13] * b.w(), + mElements[2] * b.x() + mElements[6] * b.y() + mElements[10] * b.z() + mElements[14] * b.w(), + mElements[3] * b.x() + mElements[7] * b.y() + mElements[11] * b.z() + + mElements[15] * b.w()); +} + +void Mat4::dump() +{ + printf("[ %f %f %f %f ]\n", mElements[0], mElements[4], mElements[8], mElements[12]); + printf("[ %f %f %f %f ]\n", mElements[1], mElements[5], mElements[9], mElements[13]); + printf("[ %f %f %f %f ]\n", mElements[2], mElements[6], mElements[10], mElements[14]); + printf("[ %f %f %f %f ]\n", mElements[3], mElements[7], mElements[11], mElements[15]); +} + +} // namespace angle diff --git a/gfx/angle/checkout/src/common/matrix_utils.h b/gfx/angle/checkout/src/common/matrix_utils.h new file mode 100644 index 0000000000..5a6b9680c2 --- /dev/null +++ b/gfx/angle/checkout/src/common/matrix_utils.h @@ -0,0 +1,423 @@ +// +// Copyright 2015 The ANGLE Project Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// +// Matrix: +// Utility class implementing various matrix operations. +// Supports matrices with minimum 2 and maximum 4 number of rows/columns. +// +// TODO: Check if we can merge Matrix.h in sample_util with this and replace it with this +// implementation. +// TODO: Rename this file to Matrix.h once we remove Matrix.h in sample_util. + +#ifndef COMMON_MATRIX_UTILS_H_ +#define COMMON_MATRIX_UTILS_H_ + +#include <vector> + +#include "common/debug.h" +#include "common/mathutil.h" +#include "common/vector_utils.h" + +namespace angle +{ + +template <typename T> +class Matrix +{ + public: + Matrix(const std::vector<T> &elements, const unsigned int numRows, const unsigned int numCols) + : mElements(elements), mRows(numRows), mCols(numCols) + { + ASSERT(rows() >= 1 && rows() <= 4); + ASSERT(columns() >= 1 && columns() <= 4); + } + + Matrix(const std::vector<T> &elements, const unsigned int size) + : mElements(elements), mRows(size), mCols(size) + { + ASSERT(rows() >= 1 && rows() <= 4); + ASSERT(columns() >= 1 && columns() <= 4); + } + + Matrix(const T *elements, const unsigned int size) : mRows(size), mCols(size) + { + ASSERT(rows() >= 1 && rows() <= 4); + ASSERT(columns() >= 1 && columns() <= 4); + for (size_t i = 0; i < size * size; i++) + mElements.push_back(elements[i]); + } + + const T &operator()(const unsigned int rowIndex, const unsigned int columnIndex) const + { + ASSERT(rowIndex < mRows); + ASSERT(columnIndex < mCols); + return mElements[rowIndex * columns() + columnIndex]; + } + + T &operator()(const unsigned int rowIndex, const unsigned int columnIndex) + { + ASSERT(rowIndex < mRows); + ASSERT(columnIndex < mCols); + return mElements[rowIndex * columns() + columnIndex]; + } + + const T &at(const unsigned int rowIndex, const unsigned int columnIndex) const + { + ASSERT(rowIndex < mRows); + ASSERT(columnIndex < mCols); + return operator()(rowIndex, columnIndex); + } + + Matrix<T> operator*(const Matrix<T> &m) + { + ASSERT(columns() == m.rows()); + + unsigned int resultRows = rows(); + unsigned int resultCols = m.columns(); + Matrix<T> result(std::vector<T>(resultRows * resultCols), resultRows, resultCols); + for (unsigned int i = 0; i < resultRows; i++) + { + for (unsigned int j = 0; j < resultCols; j++) + { + T tmp = 0.0f; + for (unsigned int k = 0; k < columns(); k++) + tmp += at(i, k) * m(k, j); + result(i, j) = tmp; + } + } + + return result; + } + + void operator*=(const Matrix<T> &m) + { + ASSERT(columns() == m.rows()); + Matrix<T> res = (*this) * m; + size_t numElts = res.elements().size(); + mElements.resize(numElts); + memcpy(mElements.data(), res.data(), numElts * sizeof(float)); + } + + bool operator==(const Matrix<T> &m) const + { + ASSERT(columns() == m.columns()); + ASSERT(rows() == m.rows()); + return mElements == m.elements(); + } + + bool operator!=(const Matrix<T> &m) const { return !(mElements == m.elements()); } + + bool nearlyEqual(T epsilon, const Matrix<T> &m) const + { + ASSERT(columns() == m.columns()); + ASSERT(rows() == m.rows()); + const auto &otherElts = m.elements(); + for (size_t i = 0; i < otherElts.size(); i++) + { + if ((mElements[i] - otherElts[i] > epsilon) && (otherElts[i] - mElements[i] > epsilon)) + return false; + } + return true; + } + + unsigned int size() const + { + ASSERT(rows() == columns()); + return rows(); + } + + unsigned int rows() const { return mRows; } + + unsigned int columns() const { return mCols; } + + std::vector<T> elements() const { return mElements; } + T *data() { return mElements.data(); } + + Matrix<T> compMult(const Matrix<T> &mat1) const + { + Matrix result(std::vector<T>(mElements.size()), rows(), columns()); + for (unsigned int i = 0; i < rows(); i++) + { + for (unsigned int j = 0; j < columns(); j++) + { + T lhs = at(i, j); + T rhs = mat1(i, j); + result(i, j) = rhs * lhs; + } + } + + return result; + } + + Matrix<T> outerProduct(const Matrix<T> &mat1) const + { + unsigned int cols = mat1.columns(); + Matrix result(std::vector<T>(rows() * cols), rows(), cols); + for (unsigned int i = 0; i < rows(); i++) + for (unsigned int j = 0; j < cols; j++) + result(i, j) = at(i, 0) * mat1(0, j); + + return result; + } + + Matrix<T> transpose() const + { + Matrix result(std::vector<T>(mElements.size()), columns(), rows()); + for (unsigned int i = 0; i < columns(); i++) + for (unsigned int j = 0; j < rows(); j++) + result(i, j) = at(j, i); + + return result; + } + + T determinant() const + { + ASSERT(rows() == columns()); + + switch (size()) + { + case 2: + return at(0, 0) * at(1, 1) - at(0, 1) * at(1, 0); + + case 3: + return at(0, 0) * at(1, 1) * at(2, 2) + at(0, 1) * at(1, 2) * at(2, 0) + + at(0, 2) * at(1, 0) * at(2, 1) - at(0, 2) * at(1, 1) * at(2, 0) - + at(0, 1) * at(1, 0) * at(2, 2) - at(0, 0) * at(1, 2) * at(2, 1); + + case 4: + { + const float minorMatrices[4][3 * 3] = {{ + at(1, 1), + at(2, 1), + at(3, 1), + at(1, 2), + at(2, 2), + at(3, 2), + at(1, 3), + at(2, 3), + at(3, 3), + }, + { + at(1, 0), + at(2, 0), + at(3, 0), + at(1, 2), + at(2, 2), + at(3, 2), + at(1, 3), + at(2, 3), + at(3, 3), + }, + { + at(1, 0), + at(2, 0), + at(3, 0), + at(1, 1), + at(2, 1), + at(3, 1), + at(1, 3), + at(2, 3), + at(3, 3), + }, + { + at(1, 0), + at(2, 0), + at(3, 0), + at(1, 1), + at(2, 1), + at(3, 1), + at(1, 2), + at(2, 2), + at(3, 2), + }}; + return at(0, 0) * Matrix<T>(minorMatrices[0], 3).determinant() - + at(0, 1) * Matrix<T>(minorMatrices[1], 3).determinant() + + at(0, 2) * Matrix<T>(minorMatrices[2], 3).determinant() - + at(0, 3) * Matrix<T>(minorMatrices[3], 3).determinant(); + } + + default: + UNREACHABLE(); + break; + } + + return T(); + } + + Matrix<T> inverse() const + { + ASSERT(rows() == columns()); + + Matrix<T> cof(std::vector<T>(mElements.size()), rows(), columns()); + switch (size()) + { + case 2: + cof(0, 0) = at(1, 1); + cof(0, 1) = -at(1, 0); + cof(1, 0) = -at(0, 1); + cof(1, 1) = at(0, 0); + break; + + case 3: + cof(0, 0) = at(1, 1) * at(2, 2) - at(2, 1) * at(1, 2); + cof(0, 1) = -(at(1, 0) * at(2, 2) - at(2, 0) * at(1, 2)); + cof(0, 2) = at(1, 0) * at(2, 1) - at(2, 0) * at(1, 1); + cof(1, 0) = -(at(0, 1) * at(2, 2) - at(2, 1) * at(0, 2)); + cof(1, 1) = at(0, 0) * at(2, 2) - at(2, 0) * at(0, 2); + cof(1, 2) = -(at(0, 0) * at(2, 1) - at(2, 0) * at(0, 1)); + cof(2, 0) = at(0, 1) * at(1, 2) - at(1, 1) * at(0, 2); + cof(2, 1) = -(at(0, 0) * at(1, 2) - at(1, 0) * at(0, 2)); + cof(2, 2) = at(0, 0) * at(1, 1) - at(1, 0) * at(0, 1); + break; + + case 4: + cof(0, 0) = at(1, 1) * at(2, 2) * at(3, 3) + at(2, 1) * at(3, 2) * at(1, 3) + + at(3, 1) * at(1, 2) * at(2, 3) - at(1, 1) * at(3, 2) * at(2, 3) - + at(2, 1) * at(1, 2) * at(3, 3) - at(3, 1) * at(2, 2) * at(1, 3); + cof(0, 1) = -(at(1, 0) * at(2, 2) * at(3, 3) + at(2, 0) * at(3, 2) * at(1, 3) + + at(3, 0) * at(1, 2) * at(2, 3) - at(1, 0) * at(3, 2) * at(2, 3) - + at(2, 0) * at(1, 2) * at(3, 3) - at(3, 0) * at(2, 2) * at(1, 3)); + cof(0, 2) = at(1, 0) * at(2, 1) * at(3, 3) + at(2, 0) * at(3, 1) * at(1, 3) + + at(3, 0) * at(1, 1) * at(2, 3) - at(1, 0) * at(3, 1) * at(2, 3) - + at(2, 0) * at(1, 1) * at(3, 3) - at(3, 0) * at(2, 1) * at(1, 3); + cof(0, 3) = -(at(1, 0) * at(2, 1) * at(3, 2) + at(2, 0) * at(3, 1) * at(1, 2) + + at(3, 0) * at(1, 1) * at(2, 2) - at(1, 0) * at(3, 1) * at(2, 2) - + at(2, 0) * at(1, 1) * at(3, 2) - at(3, 0) * at(2, 1) * at(1, 2)); + cof(1, 0) = -(at(0, 1) * at(2, 2) * at(3, 3) + at(2, 1) * at(3, 2) * at(0, 3) + + at(3, 1) * at(0, 2) * at(2, 3) - at(0, 1) * at(3, 2) * at(2, 3) - + at(2, 1) * at(0, 2) * at(3, 3) - at(3, 1) * at(2, 2) * at(0, 3)); + cof(1, 1) = at(0, 0) * at(2, 2) * at(3, 3) + at(2, 0) * at(3, 2) * at(0, 3) + + at(3, 0) * at(0, 2) * at(2, 3) - at(0, 0) * at(3, 2) * at(2, 3) - + at(2, 0) * at(0, 2) * at(3, 3) - at(3, 0) * at(2, 2) * at(0, 3); + cof(1, 2) = -(at(0, 0) * at(2, 1) * at(3, 3) + at(2, 0) * at(3, 1) * at(0, 3) + + at(3, 0) * at(0, 1) * at(2, 3) - at(0, 0) * at(3, 1) * at(2, 3) - + at(2, 0) * at(0, 1) * at(3, 3) - at(3, 0) * at(2, 1) * at(0, 3)); + cof(1, 3) = at(0, 0) * at(2, 1) * at(3, 2) + at(2, 0) * at(3, 1) * at(0, 2) + + at(3, 0) * at(0, 1) * at(2, 2) - at(0, 0) * at(3, 1) * at(2, 2) - + at(2, 0) * at(0, 1) * at(3, 2) - at(3, 0) * at(2, 1) * at(0, 2); + cof(2, 0) = at(0, 1) * at(1, 2) * at(3, 3) + at(1, 1) * at(3, 2) * at(0, 3) + + at(3, 1) * at(0, 2) * at(1, 3) - at(0, 1) * at(3, 2) * at(1, 3) - + at(1, 1) * at(0, 2) * at(3, 3) - at(3, 1) * at(1, 2) * at(0, 3); + cof(2, 1) = -(at(0, 0) * at(1, 2) * at(3, 3) + at(1, 0) * at(3, 2) * at(0, 3) + + at(3, 0) * at(0, 2) * at(1, 3) - at(0, 0) * at(3, 2) * at(1, 3) - + at(1, 0) * at(0, 2) * at(3, 3) - at(3, 0) * at(1, 2) * at(0, 3)); + cof(2, 2) = at(0, 0) * at(1, 1) * at(3, 3) + at(1, 0) * at(3, 1) * at(0, 3) + + at(3, 0) * at(0, 1) * at(1, 3) - at(0, 0) * at(3, 1) * at(1, 3) - + at(1, 0) * at(0, 1) * at(3, 3) - at(3, 0) * at(1, 1) * at(0, 3); + cof(2, 3) = -(at(0, 0) * at(1, 1) * at(3, 2) + at(1, 0) * at(3, 1) * at(0, 2) + + at(3, 0) * at(0, 1) * at(1, 2) - at(0, 0) * at(3, 1) * at(1, 2) - + at(1, 0) * at(0, 1) * at(3, 2) - at(3, 0) * at(1, 1) * at(0, 2)); + cof(3, 0) = -(at(0, 1) * at(1, 2) * at(2, 3) + at(1, 1) * at(2, 2) * at(0, 3) + + at(2, 1) * at(0, 2) * at(1, 3) - at(0, 1) * at(2, 2) * at(1, 3) - + at(1, 1) * at(0, 2) * at(2, 3) - at(2, 1) * at(1, 2) * at(0, 3)); + cof(3, 1) = at(0, 0) * at(1, 2) * at(2, 3) + at(1, 0) * at(2, 2) * at(0, 3) + + at(2, 0) * at(0, 2) * at(1, 3) - at(0, 0) * at(2, 2) * at(1, 3) - + at(1, 0) * at(0, 2) * at(2, 3) - at(2, 0) * at(1, 2) * at(0, 3); + cof(3, 2) = -(at(0, 0) * at(1, 1) * at(2, 3) + at(1, 0) * at(2, 1) * at(0, 3) + + at(2, 0) * at(0, 1) * at(1, 3) - at(0, 0) * at(2, 1) * at(1, 3) - + at(1, 0) * at(0, 1) * at(2, 3) - at(2, 0) * at(1, 1) * at(0, 3)); + cof(3, 3) = at(0, 0) * at(1, 1) * at(2, 2) + at(1, 0) * at(2, 1) * at(0, 2) + + at(2, 0) * at(0, 1) * at(1, 2) - at(0, 0) * at(2, 1) * at(1, 2) - + at(1, 0) * at(0, 1) * at(2, 2) - at(2, 0) * at(1, 1) * at(0, 2); + break; + + default: + UNREACHABLE(); + break; + } + + // The inverse of A is the transpose of the cofactor matrix times the reciprocal of the + // determinant of A. + Matrix<T> adjugateMatrix(cof.transpose()); + T det = determinant(); + Matrix<T> result(std::vector<T>(mElements.size()), rows(), columns()); + for (unsigned int i = 0; i < rows(); i++) + for (unsigned int j = 0; j < columns(); j++) + result(i, j) = det ? adjugateMatrix(i, j) / det : T(); + + return result; + } + + void setToIdentity() + { + ASSERT(rows() == columns()); + + const auto one = T(1); + const auto zero = T(0); + + for (auto &e : mElements) + e = zero; + + for (unsigned int i = 0; i < rows(); ++i) + { + const auto pos = i * columns() + (i % columns()); + mElements[pos] = one; + } + } + + template <unsigned int Size> + static void setToIdentity(T (&matrix)[Size]) + { + static_assert(gl::iSquareRoot<Size>() != 0, "Matrix is not square."); + + const auto cols = gl::iSquareRoot<Size>(); + const auto one = T(1); + const auto zero = T(0); + + for (auto &e : matrix) + e = zero; + + for (unsigned int i = 0; i < cols; ++i) + { + const auto pos = i * cols + (i % cols); + matrix[pos] = one; + } + } + + protected: + std::vector<T> mElements; + unsigned int mRows; + unsigned int mCols; +}; + +class Mat4 : public Matrix<float> +{ + public: + Mat4(); + Mat4(const Matrix<float> generalMatrix); + Mat4(const std::vector<float> &elements); + Mat4(const float *elements); + Mat4(float m00, + float m01, + float m02, + float m03, + float m10, + float m11, + float m12, + float m13, + float m20, + float m21, + float m22, + float m23, + float m30, + float m31, + float m32, + float m33); + + static Mat4 Rotate(float angle, const Vector3 &axis); + static Mat4 Translate(const Vector3 &t); + static Mat4 Scale(const Vector3 &s); + static Mat4 Frustum(float l, float r, float b, float t, float n, float f); + static Mat4 Perspective(float fov, float aspectRatio, float n, float f); + static Mat4 Ortho(float l, float r, float b, float t, float n, float f); + + Mat4 product(const Mat4 &m); + Vector4 product(const Vector4 &b); + void dump(); +}; + +} // namespace angle + +#endif // COMMON_MATRIX_UTILS_H_ diff --git a/gfx/angle/checkout/src/common/platform.h b/gfx/angle/checkout/src/common/platform.h new file mode 100644 index 0000000000..acb21575ac --- /dev/null +++ b/gfx/angle/checkout/src/common/platform.h @@ -0,0 +1,111 @@ +// +// Copyright (c) 2014 The ANGLE Project Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// + +// platform.h: Operating system specific includes and defines. + +#ifndef COMMON_PLATFORM_H_ +#define COMMON_PLATFORM_H_ + +#if defined(_WIN32) +# define ANGLE_PLATFORM_WINDOWS 1 +#elif defined(__Fuchsia__) +# define ANGLE_PLATFORM_FUCHSIA 1 +# define ANGLE_PLATFORM_POSIX 1 +#elif defined(__APPLE__) +# define ANGLE_PLATFORM_APPLE 1 +# define ANGLE_PLATFORM_POSIX 1 +#elif defined(ANDROID) +# define ANGLE_PLATFORM_ANDROID 1 +# define ANGLE_PLATFORM_POSIX 1 +#elif defined(__linux__) || defined(EMSCRIPTEN) +# define ANGLE_PLATFORM_LINUX 1 +# define ANGLE_PLATFORM_POSIX 1 +#elif defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__) || \ + defined(__DragonFly__) || defined(__sun) || defined(__GLIBC__) || defined(__GNU__) || \ + defined(__QNX__) || defined(__Fuchsia__) || defined(__HAIKU__) +# define ANGLE_PLATFORM_POSIX 1 +#else +# error Unsupported platform. +#endif + +#ifdef ANGLE_PLATFORM_WINDOWS +# ifndef STRICT +# define STRICT 1 +# endif +# ifndef WIN32_LEAN_AND_MEAN +# define WIN32_LEAN_AND_MEAN 1 +# endif +# ifndef NOMINMAX +# define NOMINMAX 1 +# endif + +# include <intrin.h> +# include <windows.h> + +# if defined(WINAPI_FAMILY) && (WINAPI_FAMILY != WINAPI_FAMILY_DESKTOP_APP) +# define ANGLE_ENABLE_WINDOWS_STORE 1 +# endif + +# if defined(ANGLE_ENABLE_D3D9) +# include <d3d9.h> +# include <d3dcompiler.h> +# endif + +// Include D3D11 headers when OpenGL is enabled on Windows for interop extensions. +# if defined(ANGLE_ENABLE_D3D11) || defined(ANGLE_ENABLE_OPENGL) +# include <d3d10_1.h> +# include <d3d11.h> +# include <d3d11_3.h> +# include <d3dcompiler.h> +# include <dxgi.h> +# include <dxgi1_2.h> +# endif + +# if defined(ANGLE_ENABLE_D3D9) || defined(ANGLE_ENABLE_D3D11) +# include <wrl.h> +# endif + +# if defined(ANGLE_ENABLE_WINDOWS_STORE) +# include <dxgi1_3.h> +# if defined(_DEBUG) +# include <DXProgrammableCapture.h> +# include <dxgidebug.h> +# endif +# endif + +# undef near +# undef far +#endif + +#if defined(_MSC_VER) && !defined(_M_ARM) && !defined(_M_ARM64) +# include <intrin.h> +# define ANGLE_USE_SSE +#elif defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__)) +# include <x86intrin.h> +# define ANGLE_USE_SSE +#endif + +// Mips and arm devices need to include stddef for size_t. +#if defined(__mips__) || defined(__arm__) || defined(__aarch64__) +# include <stddef.h> +#endif + +// The MemoryBarrier function name collides with a macro under Windows +// We will undef the macro so that the function name does not get replaced +#undef MemoryBarrier + +// Macro for hinting that an expression is likely to be true/false. +#if !defined(ANGLE_LIKELY) || !defined(ANGLE_UNLIKELY) +# if defined(__GNUC__) || defined(__clang__) +# define ANGLE_LIKELY(x) __builtin_expect(!!(x), 1) +# define ANGLE_UNLIKELY(x) __builtin_expect(!!(x), 0) +# else +# define ANGLE_LIKELY(x) (x) +# define ANGLE_UNLIKELY(x) (x) +# endif // defined(__GNUC__) || defined(__clang__) +#endif // !defined(ANGLE_LIKELY) || !defined(ANGLE_UNLIKELY) + +#endif // COMMON_PLATFORM_H_ diff --git a/gfx/angle/checkout/src/common/string_utils.cpp b/gfx/angle/checkout/src/common/string_utils.cpp new file mode 100644 index 0000000000..e6836bac6e --- /dev/null +++ b/gfx/angle/checkout/src/common/string_utils.cpp @@ -0,0 +1,212 @@ +// +// Copyright 2015 The ANGLE Project Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// +// string_utils: +// String helper functions. +// + +#include "string_utils.h" + +#include <stdlib.h> +#include <string.h> +#include <algorithm> +#include <fstream> +#include <sstream> + +#include "common/platform.h" + +namespace angle +{ + +const char kWhitespaceASCII[] = " \f\n\r\t\v"; + +std::vector<std::string> SplitString(const std::string &input, + const std::string &delimiters, + WhitespaceHandling whitespace, + SplitResult resultType) +{ + std::vector<std::string> result; + if (input.empty()) + { + return result; + } + + std::string::size_type start = 0; + while (start != std::string::npos) + { + auto end = input.find_first_of(delimiters, start); + + std::string piece; + if (end == std::string::npos) + { + piece = input.substr(start); + start = std::string::npos; + } + else + { + piece = input.substr(start, end - start); + start = end + 1; + } + + if (whitespace == TRIM_WHITESPACE) + { + piece = TrimString(piece, kWhitespaceASCII); + } + + if (resultType == SPLIT_WANT_ALL || !piece.empty()) + { + result.push_back(std::move(piece)); + } + } + + return result; +} + +void SplitStringAlongWhitespace(const std::string &input, std::vector<std::string> *tokensOut) +{ + + std::istringstream stream(input); + std::string line; + + while (std::getline(stream, line)) + { + size_t prev = 0, pos; + while ((pos = line.find_first_of(kWhitespaceASCII, prev)) != std::string::npos) + { + if (pos > prev) + tokensOut->push_back(line.substr(prev, pos - prev)); + prev = pos + 1; + } + if (prev < line.length()) + tokensOut->push_back(line.substr(prev, std::string::npos)); + } +} + +std::string TrimString(const std::string &input, const std::string &trimChars) +{ + auto begin = input.find_first_not_of(trimChars); + if (begin == std::string::npos) + { + return ""; + } + + std::string::size_type end = input.find_last_not_of(trimChars); + if (end == std::string::npos) + { + return input.substr(begin); + } + + return input.substr(begin, end - begin + 1); +} + +std::string GetPrefix(const std::string &input, size_t offset, const char *delimiter) +{ + size_t match = input.find(delimiter, offset); + if (match == std::string::npos) + { + return input.substr(offset); + } + return input.substr(offset, match - offset); +} + +std::string GetPrefix(const std::string &input, size_t offset, char delimiter) +{ + size_t match = input.find(delimiter, offset); + if (match == std::string::npos) + { + return input.substr(offset); + } + return input.substr(offset, match - offset); +} + +bool HexStringToUInt(const std::string &input, unsigned int *uintOut) +{ + unsigned int offset = 0; + + if (input.size() >= 2 && input[0] == '0' && input[1] == 'x') + { + offset = 2u; + } + + // Simple validity check + if (input.find_first_not_of("0123456789ABCDEFabcdef", offset) != std::string::npos) + { + return false; + } + + std::stringstream inStream(input); + inStream >> std::hex >> *uintOut; + return !inStream.fail(); +} + +bool ReadFileToString(const std::string &path, std::string *stringOut) +{ + std::ifstream inFile(path.c_str()); + if (inFile.fail()) + { + return false; + } + + inFile.seekg(0, std::ios::end); + stringOut->reserve(static_cast<std::string::size_type>(inFile.tellg())); + inFile.seekg(0, std::ios::beg); + + stringOut->assign(std::istreambuf_iterator<char>(inFile), std::istreambuf_iterator<char>()); + return !inFile.fail(); +} + +bool BeginsWith(const std::string &str, const std::string &prefix) +{ + return strncmp(str.c_str(), prefix.c_str(), prefix.length()) == 0; +} + +bool BeginsWith(const std::string &str, const char *prefix) +{ + return strncmp(str.c_str(), prefix, strlen(prefix)) == 0; +} + +bool BeginsWith(const char *str, const char *prefix) +{ + return strncmp(str, prefix, strlen(prefix)) == 0; +} + +bool BeginsWith(const std::string &str, const std::string &prefix, const size_t prefixLength) +{ + return strncmp(str.c_str(), prefix.c_str(), prefixLength) == 0; +} + +bool EndsWith(const std::string &str, const char *suffix) +{ + const auto len = strlen(suffix); + if (len > str.size()) + return false; + + const char *end = str.c_str() + str.size() - len; + + return memcmp(end, suffix, len) == 0; +} + +void ToLower(std::string *str) +{ + for (auto &ch : *str) + { + ch = static_cast<char>(::tolower(ch)); + } +} + +bool ReplaceSubstring(std::string *str, + const std::string &substring, + const std::string &replacement) +{ + size_t replacePos = str->find(substring); + if (replacePos == std::string::npos) + { + return false; + } + str->replace(replacePos, substring.size(), replacement); + return true; +} + +} // namespace angle diff --git a/gfx/angle/checkout/src/common/string_utils.h b/gfx/angle/checkout/src/common/string_utils.h new file mode 100644 index 0000000000..543ba38601 --- /dev/null +++ b/gfx/angle/checkout/src/common/string_utils.h @@ -0,0 +1,86 @@ +// +// Copyright 2015 The ANGLE Project Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// +// string_utils: +// String helper functions. +// + +#ifndef LIBANGLE_STRING_UTILS_H_ +#define LIBANGLE_STRING_UTILS_H_ + +#include <string> +#include <vector> + +#include "common/Optional.h" + +namespace angle +{ + +extern const char kWhitespaceASCII[]; + +enum WhitespaceHandling +{ + KEEP_WHITESPACE, + TRIM_WHITESPACE, +}; + +enum SplitResult +{ + SPLIT_WANT_ALL, + SPLIT_WANT_NONEMPTY, +}; + +std::vector<std::string> SplitString(const std::string &input, + const std::string &delimiters, + WhitespaceHandling whitespace, + SplitResult resultType); + +void SplitStringAlongWhitespace(const std::string &input, std::vector<std::string> *tokensOut); + +std::string TrimString(const std::string &input, const std::string &trimChars); + +// Return the substring starting at offset and up to the first occurance of the |delimeter|. +std::string GetPrefix(const std::string &input, size_t offset, const char *delimiter); +std::string GetPrefix(const std::string &input, size_t offset, char delimiter); + +bool HexStringToUInt(const std::string &input, unsigned int *uintOut); + +bool ReadFileToString(const std::string &path, std::string *stringOut); + +// Check if the string str begins with the given prefix. +// The comparison is case sensitive. +bool BeginsWith(const std::string &str, const std::string &prefix); + +// Check if the string str begins with the given prefix. +// Prefix may not be NULL and needs to be NULL terminated. +// The comparison is case sensitive. +bool BeginsWith(const std::string &str, const char *prefix); + +// Check if the string str begins with the given prefix. +// str and prefix may not be NULL and need to be NULL terminated. +// The comparison is case sensitive. +bool BeginsWith(const char *str, const char *prefix); + +// Check if the string str begins with the first prefixLength characters of the given prefix. +// The length of the prefix string should be greater than or equal to prefixLength. +// The comparison is case sensitive. +bool BeginsWith(const std::string &str, const std::string &prefix, const size_t prefixLength); + +// Check if the string str ends with the given suffix. +// Suffix may not be NUL and needs to be NULL terminated. +// The comparison is case sensitive. +bool EndsWith(const std::string &str, const char *suffix); + +// Convert to lower-case. +void ToLower(std::string *str); + +// Replaces the substring 'substring' in 'str' with 'replacement'. Returns true if successful. +bool ReplaceSubstring(std::string *str, + const std::string &substring, + const std::string &replacement); + +} // namespace angle + +#endif // LIBANGLE_STRING_UTILS_H_ diff --git a/gfx/angle/checkout/src/common/system_utils.cpp b/gfx/angle/checkout/src/common/system_utils.cpp new file mode 100644 index 0000000000..257fc42c2e --- /dev/null +++ b/gfx/angle/checkout/src/common/system_utils.cpp @@ -0,0 +1,31 @@ +// +// Copyright 2018 The ANGLE Project Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// + +// system_utils.cpp: Implementation of common functions + +#include "system_utils.h" + +namespace angle +{ +bool PrependPathToEnvironmentVar(const char *variableName, const char *path) +{ + std::string oldValue = GetEnvironmentVar(variableName); + const char *newValue = nullptr; + std::string buf; + if (oldValue.empty()) + { + newValue = path; + } + else + { + buf = path; + buf += GetPathSeparator(); + buf += oldValue; + newValue = buf.c_str(); + } + return SetEnvironmentVar(variableName, newValue); +} +} // namespace angle diff --git a/gfx/angle/checkout/src/common/system_utils.h b/gfx/angle/checkout/src/common/system_utils.h new file mode 100644 index 0000000000..0ea4959d0b --- /dev/null +++ b/gfx/angle/checkout/src/common/system_utils.h @@ -0,0 +1,71 @@ +// +// Copyright (c) 2014 The ANGLE Project Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// + +// system_utils.h: declaration of OS-specific utility functions + +#ifndef COMMON_SYSTEM_UTILS_H_ +#define COMMON_SYSTEM_UTILS_H_ + +#include "common/Optional.h" +#include "common/angleutils.h" + +namespace angle +{ +std::string GetExecutablePath(); +std::string GetExecutableDirectory(); +const char *GetSharedLibraryExtension(); +Optional<std::string> GetCWD(); +bool SetCWD(const char *dirName); +bool SetEnvironmentVar(const char *variableName, const char *value); +bool UnsetEnvironmentVar(const char *variableName); +std::string GetEnvironmentVar(const char *variableName); +const char *GetPathSeparator(); +bool PrependPathToEnvironmentVar(const char *variableName, const char *path); +bool IsDirectory(const char *filename); + +// Run an application and get the output. Gets a nullptr-terminated set of args to execute the +// application with, and returns the stdout and stderr outputs as well as the exit code. +// +// Pass nullptr for stdoutOut/stderrOut if you don't need to capture. exitCodeOut is required. +// +// Returns false if it fails to actually execute the application. +bool RunApp(const std::vector<const char *> &args, + std::string *stdoutOut, + std::string *stderrOut, + int *exitCodeOut); + +class Library : angle::NonCopyable +{ + public: + virtual ~Library() {} + virtual void *getSymbol(const char *symbolName) = 0; + virtual void *getNative() const = 0; + + template <typename FuncT> + void getAs(const char *symbolName, FuncT *funcOut) + { + *funcOut = reinterpret_cast<FuncT>(getSymbol(symbolName)); + } +}; + +// Use SYSTEM_DIR to bypass loading ANGLE libraries with the same name as system DLLS +// (e.g. opengl32.dll) +enum class SearchType +{ + ApplicationDir, + SystemDir +}; + +Library *OpenSharedLibrary(const char *libraryName, SearchType searchType); + +// Returns true if the process is currently being debugged. +bool IsDebuggerAttached(); + +// Calls system APIs to break into the debugger. +void BreakDebugger(); +} // namespace angle + +#endif // COMMON_SYSTEM_UTILS_H_ diff --git a/gfx/angle/checkout/src/common/system_utils_linux.cpp b/gfx/angle/checkout/src/common/system_utils_linux.cpp new file mode 100644 index 0000000000..f7ea49fc4d --- /dev/null +++ b/gfx/angle/checkout/src/common/system_utils_linux.cpp @@ -0,0 +1,47 @@ +// +// Copyright (c) 2015 The ANGLE Project Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// + +// system_utils_linux.cpp: Implementation of OS-specific functions for Linux + +#include "system_utils.h" + +#include <sys/stat.h> +#include <sys/time.h> +#include <sys/types.h> +#include <unistd.h> + +#include <array> + +namespace angle +{ +std::string GetExecutablePath() +{ + // We cannot use lstat to get the size of /proc/self/exe as it always returns 0 + // so we just use a big buffer and hope the path fits in it. + char path[4096]; + + ssize_t result = readlink("/proc/self/exe", path, sizeof(path) - 1); + if (result < 0 || static_cast<size_t>(result) >= sizeof(path) - 1) + { + return ""; + } + + path[result] = '\0'; + return path; +} + +std::string GetExecutableDirectory() +{ + std::string executablePath = GetExecutablePath(); + size_t lastPathSepLoc = executablePath.find_last_of("/"); + return (lastPathSepLoc != std::string::npos) ? executablePath.substr(0, lastPathSepLoc) : ""; +} + +const char *GetSharedLibraryExtension() +{ + return "so"; +} +} // namespace angle diff --git a/gfx/angle/checkout/src/common/system_utils_mac.cpp b/gfx/angle/checkout/src/common/system_utils_mac.cpp new file mode 100644 index 0000000000..f3d9891ebb --- /dev/null +++ b/gfx/angle/checkout/src/common/system_utils_mac.cpp @@ -0,0 +1,52 @@ +// +// Copyright (c) 2015 The ANGLE Project Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// + +// system_utils_osx.cpp: Implementation of OS-specific functions for OSX + +#include "system_utils.h" + +#include <unistd.h> + +#include <mach-o/dyld.h> +#include <cstdlib> +#include <vector> + +#include <array> + +namespace angle +{ +std::string GetExecutablePath() +{ + std::string result; + + uint32_t size = 0; + _NSGetExecutablePath(nullptr, &size); + + std::vector<char> buffer; + buffer.resize(size + 1); + + _NSGetExecutablePath(buffer.data(), &size); + buffer[size] = '\0'; + + if (!strrchr(buffer.data(), '/')) + { + return ""; + } + return buffer.data(); +} + +std::string GetExecutableDirectory() +{ + std::string executablePath = GetExecutablePath(); + size_t lastPathSepLoc = executablePath.find_last_of("/"); + return (lastPathSepLoc != std::string::npos) ? executablePath.substr(0, lastPathSepLoc) : ""; +} + +const char *GetSharedLibraryExtension() +{ + return "dylib"; +} +} // namespace angle diff --git a/gfx/angle/checkout/src/common/system_utils_posix.cpp b/gfx/angle/checkout/src/common/system_utils_posix.cpp new file mode 100644 index 0000000000..26501eb2a6 --- /dev/null +++ b/gfx/angle/checkout/src/common/system_utils_posix.cpp @@ -0,0 +1,276 @@ +// +// Copyright 2018 The ANGLE Project Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// + +// system_utils_posix.cpp: Implementation of POSIX OS-specific functions. + +#include "system_utils.h" + +#include <array> + +#include <dlfcn.h> +#include <sys/stat.h> +#include <sys/types.h> +#include <sys/wait.h> +#include <unistd.h> + +namespace angle +{ + +namespace +{ +struct ScopedPipe +{ + ~ScopedPipe() + { + closeEndPoint(0); + closeEndPoint(1); + } + void closeEndPoint(int index) + { + if (fds[index] >= 0) + { + close(fds[index]); + fds[index] = -1; + } + } + int fds[2] = { + -1, + -1, + }; +}; + +void ReadEntireFile(int fd, std::string *out) +{ + out->clear(); + + while (true) + { + char buffer[256]; + ssize_t bytesRead = read(fd, buffer, sizeof(buffer)); + + // If interrupted, retry. + if (bytesRead < 0 && errno == EINTR) + { + continue; + } + + // If failed, or nothing to read, we are done. + if (bytesRead <= 0) + { + break; + } + + out->append(buffer, bytesRead); + } +} +} // anonymous namespace + +Optional<std::string> GetCWD() +{ + std::array<char, 4096> pathBuf; + char *result = getcwd(pathBuf.data(), pathBuf.size()); + if (result == nullptr) + { + return Optional<std::string>::Invalid(); + } + return std::string(pathBuf.data()); +} + +bool SetCWD(const char *dirName) +{ + return (chdir(dirName) == 0); +} + +bool UnsetEnvironmentVar(const char *variableName) +{ + return (unsetenv(variableName) == 0); +} + +bool SetEnvironmentVar(const char *variableName, const char *value) +{ + return (setenv(variableName, value, 1) == 0); +} + +std::string GetEnvironmentVar(const char *variableName) +{ + const char *value = getenv(variableName); + return (value == nullptr ? std::string() : std::string(value)); +} + +const char *GetPathSeparator() +{ + return ":"; +} + +bool RunApp(const std::vector<const char *> &args, + std::string *stdoutOut, + std::string *stderrOut, + int *exitCodeOut) +{ + if (args.size() == 0 || args.back() != nullptr) + { + return false; + } + + ScopedPipe stdoutPipe; + ScopedPipe stderrPipe; + + // Create pipes for stdout and stderr. + if (stdoutOut && pipe(stdoutPipe.fds) != 0) + { + return false; + } + if (stderrOut && pipe(stderrPipe.fds) != 0) + { + return false; + } + + pid_t pid = fork(); + if (pid < 0) + { + return false; + } + + if (pid == 0) + { + // Child. Execute the application. + + // Redirect stdout and stderr to the pipe fds. + if (stdoutOut) + { + if (dup2(stdoutPipe.fds[1], STDOUT_FILENO) < 0) + { + _exit(errno); + } + } + if (stderrOut) + { + if (dup2(stderrPipe.fds[1], STDERR_FILENO) < 0) + { + _exit(errno); + } + } + + // Execute the application, which doesn't return unless failed. Note: execv takes argv as + // `char * const *` for historical reasons. It is safe to const_cast it: + // + // http://pubs.opengroup.org/onlinepubs/9699919799/functions/exec.html + // + // > The statement about argv[] and envp[] being constants is included to make explicit to + // future writers of language bindings that these objects are completely constant. Due to a + // limitation of the ISO C standard, it is not possible to state that idea in standard C. + // Specifying two levels of const- qualification for the argv[] and envp[] parameters for + // the exec functions may seem to be the natural choice, given that these functions do not + // modify either the array of pointers or the characters to which the function points, but + // this would disallow existing correct code. Instead, only the array of pointers is noted + // as constant. + execv(args[0], const_cast<char *const *>(args.data())); + _exit(errno); + } + + // Parent. Read child output from the pipes and clean it up. + + // Close the write end of the pipes, so EOF can be generated when child exits. + stdoutPipe.closeEndPoint(1); + stderrPipe.closeEndPoint(1); + + // Read back the output of the child. + if (stdoutOut) + { + ReadEntireFile(stdoutPipe.fds[0], stdoutOut); + } + if (stderrOut) + { + ReadEntireFile(stderrPipe.fds[0], stderrOut); + } + + // Cleanup the child. + int status = 0; + do + { + pid_t changedPid = waitpid(pid, &status, 0); + if (changedPid < 0 && errno == EINTR) + { + continue; + } + if (changedPid < 0) + { + return false; + } + } while (!WIFEXITED(status) && !WIFSIGNALED(status)); + + // Retrieve the error code. + if (exitCodeOut) + { + *exitCodeOut = WEXITSTATUS(status); + } + + return true; +} + +class PosixLibrary : public Library +{ + public: + PosixLibrary(const char *libraryName) + { + char buffer[1000]; + int ret = snprintf(buffer, 1000, "%s.%s", libraryName, GetSharedLibraryExtension()); + if (ret > 0 && ret < 1000) + { + mModule = dlopen(buffer, RTLD_NOW); + } + } + + ~PosixLibrary() override + { + if (mModule) + { + dlclose(mModule); + } + } + + void *getSymbol(const char *symbolName) override + { + if (!mModule) + { + return nullptr; + } + + return dlsym(mModule, symbolName); + } + + void *getNative() const override { return mModule; } + + private: + void *mModule = nullptr; +}; + +Library *OpenSharedLibrary(const char *libraryName, SearchType searchType) +{ + return new PosixLibrary(libraryName); +} + +bool IsDirectory(const char *filename) +{ + struct stat st; + int result = stat(filename, &st); + return result == 0 && ((st.st_mode & S_IFDIR) == S_IFDIR); +} + +bool IsDebuggerAttached() +{ + // This could have a fuller implementation. + // See https://cs.chromium.org/chromium/src/base/debug/debugger_posix.cc + return false; +} + +void BreakDebugger() +{ + // This could have a fuller implementation. + // See https://cs.chromium.org/chromium/src/base/debug/debugger_posix.cc + abort(); +} +} // namespace angle diff --git a/gfx/angle/checkout/src/common/system_utils_win.cpp b/gfx/angle/checkout/src/common/system_utils_win.cpp new file mode 100644 index 0000000000..796e928192 --- /dev/null +++ b/gfx/angle/checkout/src/common/system_utils_win.cpp @@ -0,0 +1,318 @@ +// +// Copyright (c) 2014 The ANGLE Project Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// + +// system_utils_win.cpp: Implementation of OS-specific functions for Windows + +#include "system_utils.h" + +#include <stdarg.h> +#include <windows.h> +#include <array> +#include <vector> + +namespace angle +{ +namespace +{ +struct ScopedPipe +{ + ~ScopedPipe() + { + closeReadHandle(); + closeWriteHandle(); + } + void closeReadHandle() + { + if (readHandle) + { + CloseHandle(readHandle); + readHandle = nullptr; + } + } + void closeWriteHandle() + { + if (writeHandle) + { + CloseHandle(writeHandle); + writeHandle = nullptr; + } + } + HANDLE readHandle = nullptr; + HANDLE writeHandle = nullptr; +}; + +void ReadEntireFile(HANDLE handle, std::string *out) +{ + out->clear(); + + while (true) + { + char buffer[256]; + DWORD bytesRead; + + BOOL success = ReadFile(handle, buffer, sizeof(buffer), &bytesRead, nullptr); + + if (!success || bytesRead == 0) + { + break; + } + + out->append(buffer, bytesRead); + } +} +} // anonymous namespace + +std::string GetExecutablePath() +{ + std::array<char, MAX_PATH> executableFileBuf; + DWORD executablePathLen = GetModuleFileNameA(nullptr, executableFileBuf.data(), + static_cast<DWORD>(executableFileBuf.size())); + return (executablePathLen > 0 ? std::string(executableFileBuf.data()) : ""); +} + +std::string GetExecutableDirectory() +{ + std::string executablePath = GetExecutablePath(); + size_t lastPathSepLoc = executablePath.find_last_of("\\/"); + return (lastPathSepLoc != std::string::npos) ? executablePath.substr(0, lastPathSepLoc) : ""; +} + +const char *GetSharedLibraryExtension() +{ + return "dll"; +} + +Optional<std::string> GetCWD() +{ + std::array<char, MAX_PATH> pathBuf; + DWORD result = GetCurrentDirectoryA(static_cast<DWORD>(pathBuf.size()), pathBuf.data()); + if (result == 0) + { + return Optional<std::string>::Invalid(); + } + return std::string(pathBuf.data()); +} + +bool SetCWD(const char *dirName) +{ + return (SetCurrentDirectoryA(dirName) == TRUE); +} + +bool UnsetEnvironmentVar(const char *variableName) +{ + return (SetEnvironmentVariableA(variableName, nullptr) == TRUE); +} + +bool SetEnvironmentVar(const char *variableName, const char *value) +{ + return (SetEnvironmentVariableA(variableName, value) == TRUE); +} + +std::string GetEnvironmentVar(const char *variableName) +{ + std::array<char, MAX_PATH> oldValue; + DWORD result = + GetEnvironmentVariableA(variableName, oldValue.data(), static_cast<DWORD>(oldValue.size())); + if (result == 0) + { + return std::string(); + } + else + { + return std::string(oldValue.data()); + } +} + +const char *GetPathSeparator() +{ + return ";"; +} + +bool RunApp(const std::vector<const char *> &args, + std::string *stdoutOut, + std::string *stderrOut, + int *exitCodeOut) +{ + ScopedPipe stdoutPipe; + ScopedPipe stderrPipe; + + SECURITY_ATTRIBUTES sa_attr; + // Set the bInheritHandle flag so pipe handles are inherited. + sa_attr.nLength = sizeof(SECURITY_ATTRIBUTES); + sa_attr.bInheritHandle = TRUE; + sa_attr.lpSecurityDescriptor = nullptr; + + // Create pipes for stdout and stderr. Ensure the read handles to the pipes are not inherited. + if (stdoutOut && !CreatePipe(&stdoutPipe.readHandle, &stdoutPipe.writeHandle, &sa_attr, 0) && + !SetHandleInformation(stdoutPipe.readHandle, HANDLE_FLAG_INHERIT, 0)) + { + return false; + } + if (stderrOut && !CreatePipe(&stderrPipe.readHandle, &stderrPipe.writeHandle, &sa_attr, 0) && + !SetHandleInformation(stderrPipe.readHandle, HANDLE_FLAG_INHERIT, 0)) + { + return false; + } + + // Concat the nicely separated arguments into one string so the application has to reparse it. + // We don't support quotation and spaces in arguments currently. + std::vector<char> commandLineString; + for (const char *arg : args) + { + if (arg) + { + if (!commandLineString.empty()) + { + commandLineString.push_back(' '); + } + commandLineString.insert(commandLineString.end(), arg, arg + strlen(arg)); + } + } + commandLineString.push_back('\0'); + + STARTUPINFOA startInfo = {}; + + startInfo.cb = sizeof(STARTUPINFOA); + startInfo.hStdInput = GetStdHandle(STD_INPUT_HANDLE); + if (stdoutOut) + { + startInfo.hStdOutput = stdoutPipe.writeHandle; + } + else + { + startInfo.hStdError = GetStdHandle(STD_OUTPUT_HANDLE); + } + if (stderrOut) + { + startInfo.hStdError = stderrPipe.writeHandle; + } + else + { + startInfo.hStdError = GetStdHandle(STD_ERROR_HANDLE); + } + + if (stderrOut || stdoutOut) + { + startInfo.dwFlags |= STARTF_USESTDHANDLES; + } + + // Create the child process. + PROCESS_INFORMATION processInfo = {}; + if (!CreateProcessA(nullptr, commandLineString.data(), nullptr, nullptr, + TRUE, // Handles are inherited. + 0, nullptr, nullptr, &startInfo, &processInfo)) + { + return false; + } + + // Close the write end of the pipes, so EOF can be generated when child exits. + stdoutPipe.closeWriteHandle(); + stderrPipe.closeWriteHandle(); + + // Read back the output of the child. + if (stdoutOut) + { + ReadEntireFile(stdoutPipe.readHandle, stdoutOut); + } + if (stderrOut) + { + ReadEntireFile(stderrPipe.readHandle, stderrOut); + } + + // Cleanup the child. + bool success = WaitForSingleObject(processInfo.hProcess, INFINITE) == WAIT_OBJECT_0; + + if (success) + { + DWORD exitCode = 0; + success = GetExitCodeProcess(processInfo.hProcess, &exitCode); + + if (success) + { + *exitCodeOut = static_cast<int>(exitCode); + } + } + + CloseHandle(processInfo.hProcess); + CloseHandle(processInfo.hThread); + + return success; +} + +class Win32Library : public Library +{ + public: + Win32Library(const char *libraryName, SearchType searchType) + { + char buffer[MAX_PATH]; + int ret = snprintf(buffer, MAX_PATH, "%s.%s", libraryName, GetSharedLibraryExtension()); + if (ret > 0 && ret < MAX_PATH) + { + switch (searchType) + { + case SearchType::ApplicationDir: + mModule = LoadLibraryA(buffer); + break; + case SearchType::SystemDir: + mModule = LoadLibraryExA(buffer, nullptr, LOAD_LIBRARY_SEARCH_SYSTEM32); + break; + } + } + } + + ~Win32Library() override + { + if (mModule) + { + FreeLibrary(mModule); + } + } + + void *getSymbol(const char *symbolName) override + { + if (!mModule) + { + return nullptr; + } + + return reinterpret_cast<void *>(GetProcAddress(mModule, symbolName)); + } + + void *getNative() const override { return reinterpret_cast<void *>(mModule); } + + private: + HMODULE mModule = nullptr; +}; + +Library *OpenSharedLibrary(const char *libraryName, SearchType searchType) +{ + return new Win32Library(libraryName, searchType); +} + +bool IsDirectory(const char *filename) +{ + WIN32_FILE_ATTRIBUTE_DATA fileInformation; + + BOOL result = GetFileAttributesExA(filename, GetFileExInfoStandard, &fileInformation); + if (result) + { + DWORD attribs = fileInformation.dwFileAttributes; + return (attribs != INVALID_FILE_ATTRIBUTES) && ((attribs & FILE_ATTRIBUTE_DIRECTORY) > 0); + } + + return false; +} + +bool IsDebuggerAttached() +{ + return !!::IsDebuggerPresent(); +} + +void BreakDebugger() +{ + __debugbreak(); +} +} // namespace angle diff --git a/gfx/angle/checkout/src/common/third_party/base/anglebase/base_export.h b/gfx/angle/checkout/src/common/third_party/base/anglebase/base_export.h new file mode 100644 index 0000000000..1af5485336 --- /dev/null +++ b/gfx/angle/checkout/src/common/third_party/base/anglebase/base_export.h @@ -0,0 +1,13 @@ +// +// Copyright 2017 The ANGLE Project Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// +// base_export.h: Compatiblity hacks for importing Chromium's base/SHA1. + +#ifndef ANGLEBASE_BASE_EXPORT_H_ +#define ANGLEBASE_BASE_EXPORT_H_ + +#define ANGLEBASE_EXPORT + +#endif // ANGLEBASE_BASE_EXPORT_H_
\ No newline at end of file diff --git a/gfx/angle/checkout/src/common/third_party/base/anglebase/containers/mru_cache.h b/gfx/angle/checkout/src/common/third_party/base/anglebase/containers/mru_cache.h new file mode 100644 index 0000000000..fe4fec5768 --- /dev/null +++ b/gfx/angle/checkout/src/common/third_party/base/anglebase/containers/mru_cache.h @@ -0,0 +1,275 @@ +// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// This file contains a template for a Most Recently Used cache that allows +// constant-time access to items using a key, but easy identification of the +// least-recently-used items for removal. Each key can only be associated with +// one payload item at a time. +// +// The key object will be stored twice, so it should support efficient copying. +// +// NOTE: While all operations are O(1), this code is written for +// legibility rather than optimality. If future profiling identifies this as +// a bottleneck, there is room for smaller values of 1 in the O(1). :] + +#ifndef ANGLEBASE_CONTAINERS_MRU_CACHE_H_ +#define ANGLEBASE_CONTAINERS_MRU_CACHE_H_ + +#include <stddef.h> + +#include <algorithm> +#include <functional> +#include <list> +#include <map> +#include <unordered_map> +#include <utility> + +#include "anglebase/logging.h" +#include "anglebase/macros.h" + +namespace angle +{ + +namespace base +{ + +// MRUCacheBase ---------------------------------------------------------------- + +// This template is used to standardize map type containers that can be used +// by MRUCacheBase. This level of indirection is necessary because of the way +// that template template params and default template params interact. +template <class KeyType, class ValueType, class CompareType> +struct MRUCacheStandardMap +{ + typedef std::map<KeyType, ValueType, CompareType> Type; +}; + +// Base class for the MRU cache specializations defined below. +template <class KeyType, + class PayloadType, + class HashOrCompareType, + template <typename, typename, typename> class MapType = MRUCacheStandardMap> +class MRUCacheBase +{ + public: + // The payload of the list. This maintains a copy of the key so we can + // efficiently delete things given an element of the list. + typedef std::pair<KeyType, PayloadType> value_type; + + private: + typedef std::list<value_type> PayloadList; + typedef + typename MapType<KeyType, typename PayloadList::iterator, HashOrCompareType>::Type KeyIndex; + + public: + typedef typename PayloadList::size_type size_type; + + typedef typename PayloadList::iterator iterator; + typedef typename PayloadList::const_iterator const_iterator; + typedef typename PayloadList::reverse_iterator reverse_iterator; + typedef typename PayloadList::const_reverse_iterator const_reverse_iterator; + + enum + { + NO_AUTO_EVICT = 0 + }; + + // The max_size is the size at which the cache will prune its members to when + // a new item is inserted. If the caller wants to manager this itself (for + // example, maybe it has special work to do when something is evicted), it + // can pass NO_AUTO_EVICT to not restrict the cache size. + explicit MRUCacheBase(size_type max_size) : max_size_(max_size) {} + + virtual ~MRUCacheBase() {} + + size_type max_size() const { return max_size_; } + + // Inserts a payload item with the given key. If an existing item has + // the same key, it is removed prior to insertion. An iterator indicating the + // inserted item will be returned (this will always be the front of the list). + // + // The payload will be forwarded. + template <typename Payload> + iterator Put(const KeyType &key, Payload &&payload) + { + // Remove any existing payload with that key. + typename KeyIndex::iterator index_iter = index_.find(key); + if (index_iter != index_.end()) + { + // Erase the reference to it. The index reference will be replaced in the + // code below. + Erase(index_iter->second); + } + else if (max_size_ != NO_AUTO_EVICT) + { + // New item is being inserted which might make it larger than the maximum + // size: kick the oldest thing out if necessary. + ShrinkToSize(max_size_ - 1); + } + + ordering_.emplace_front(key, std::forward<Payload>(payload)); + index_.emplace(key, ordering_.begin()); + return ordering_.begin(); + } + + // Retrieves the contents of the given key, or end() if not found. This method + // has the side effect of moving the requested item to the front of the + // recency list. + iterator Get(const KeyType &key) + { + typename KeyIndex::iterator index_iter = index_.find(key); + if (index_iter == index_.end()) + return end(); + typename PayloadList::iterator iter = index_iter->second; + + // Move the touched item to the front of the recency ordering. + ordering_.splice(ordering_.begin(), ordering_, iter); + return ordering_.begin(); + } + + // Retrieves the payload associated with a given key and returns it via + // result without affecting the ordering (unlike Get). + iterator Peek(const KeyType &key) + { + typename KeyIndex::const_iterator index_iter = index_.find(key); + if (index_iter == index_.end()) + return end(); + return index_iter->second; + } + + const_iterator Peek(const KeyType &key) const + { + typename KeyIndex::const_iterator index_iter = index_.find(key); + if (index_iter == index_.end()) + return end(); + return index_iter->second; + } + + // Exchanges the contents of |this| by the contents of the |other|. + void Swap(MRUCacheBase &other) + { + ordering_.swap(other.ordering_); + index_.swap(other.index_); + std::swap(max_size_, other.max_size_); + } + + // Erases the item referenced by the given iterator. An iterator to the item + // following it will be returned. The iterator must be valid. + iterator Erase(iterator pos) + { + index_.erase(pos->first); + return ordering_.erase(pos); + } + + // MRUCache entries are often processed in reverse order, so we add this + // convenience function (not typically defined by STL containers). + reverse_iterator Erase(reverse_iterator pos) + { + // We have to actually give it the incremented iterator to delete, since + // the forward iterator that base() returns is actually one past the item + // being iterated over. + return reverse_iterator(Erase((++pos).base())); + } + + // Shrinks the cache so it only holds |new_size| items. If |new_size| is + // bigger or equal to the current number of items, this will do nothing. + void ShrinkToSize(size_type new_size) + { + for (size_type i = size(); i > new_size; i--) + Erase(rbegin()); + } + + // Deletes everything from the cache. + void Clear() + { + index_.clear(); + ordering_.clear(); + } + + // Returns the number of elements in the cache. + size_type size() const + { + // We don't use ordering_.size() for the return value because + // (as a linked list) it can be O(n). + DCHECK(index_.size() == ordering_.size()); + return index_.size(); + } + + // Allows iteration over the list. Forward iteration starts with the most + // recent item and works backwards. + // + // Note that since these iterators are actually iterators over a list, you + // can keep them as you insert or delete things (as long as you don't delete + // the one you are pointing to) and they will still be valid. + iterator begin() { return ordering_.begin(); } + const_iterator begin() const { return ordering_.begin(); } + iterator end() { return ordering_.end(); } + const_iterator end() const { return ordering_.end(); } + + reverse_iterator rbegin() { return ordering_.rbegin(); } + const_reverse_iterator rbegin() const { return ordering_.rbegin(); } + reverse_iterator rend() { return ordering_.rend(); } + const_reverse_iterator rend() const { return ordering_.rend(); } + + bool empty() const { return ordering_.empty(); } + + private: + PayloadList ordering_; + KeyIndex index_; + + size_type max_size_; + + DISALLOW_COPY_AND_ASSIGN(MRUCacheBase); +}; + +// MRUCache -------------------------------------------------------------------- + +// A container that does not do anything to free its data. Use this when storing +// value types (as opposed to pointers) in the list. +template <class KeyType, class PayloadType, class CompareType = std::less<KeyType>> +class MRUCache : public MRUCacheBase<KeyType, PayloadType, CompareType> +{ + private: + using ParentType = MRUCacheBase<KeyType, PayloadType, CompareType>; + + public: + // See MRUCacheBase, noting the possibility of using NO_AUTO_EVICT. + explicit MRUCache(typename ParentType::size_type max_size) : ParentType(max_size) {} + virtual ~MRUCache() {} + + private: + DISALLOW_COPY_AND_ASSIGN(MRUCache); +}; + +// HashingMRUCache ------------------------------------------------------------ + +template <class KeyType, class ValueType, class HashType> +struct MRUCacheHashMap +{ + typedef std::unordered_map<KeyType, ValueType, HashType> Type; +}; + +// This class is similar to MRUCache, except that it uses std::unordered_map as +// the map type instead of std::map. Note that your KeyType must be hashable to +// use this cache or you need to provide a hashing class. +template <class KeyType, class PayloadType, class HashType = std::hash<KeyType>> +class HashingMRUCache : public MRUCacheBase<KeyType, PayloadType, HashType, MRUCacheHashMap> +{ + private: + using ParentType = MRUCacheBase<KeyType, PayloadType, HashType, MRUCacheHashMap>; + + public: + // See MRUCacheBase, noting the possibility of using NO_AUTO_EVICT. + explicit HashingMRUCache(typename ParentType::size_type max_size) : ParentType(max_size) {} + virtual ~HashingMRUCache() {} + + private: + DISALLOW_COPY_AND_ASSIGN(HashingMRUCache); +}; + +} // namespace base + +} // namespace angle + +#endif // ANGLEBASE_CONTAINERS_MRU_CACHE_H_ diff --git a/gfx/angle/checkout/src/common/third_party/base/anglebase/logging.h b/gfx/angle/checkout/src/common/third_party/base/anglebase/logging.h new file mode 100644 index 0000000000..73f81e87f2 --- /dev/null +++ b/gfx/angle/checkout/src/common/third_party/base/anglebase/logging.h @@ -0,0 +1,26 @@ +// +// Copyright 2016 The ANGLE Project Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// +// logging.h: Compatiblity hacks for importing Chromium's base/numerics. + +#ifndef ANGLEBASE_LOGGING_H_ +#define ANGLEBASE_LOGGING_H_ + +#include "common/debug.h" + +#ifndef DCHECK +# define DCHECK(X) ASSERT(X) +#endif + +#ifndef CHECK +# define CHECK(X) ASSERT(X) +#endif + +// Unfortunately ANGLE relies on ASSERT being an empty statement, which these libs don't respect. +#ifndef NOTREACHED +# define NOTREACHED() ({ UNREACHABLE(); }) +#endif + +#endif // ANGLEBASE_LOGGING_H_ diff --git a/gfx/angle/checkout/src/common/third_party/base/anglebase/macros.h b/gfx/angle/checkout/src/common/third_party/base/anglebase/macros.h new file mode 100644 index 0000000000..06391784e4 --- /dev/null +++ b/gfx/angle/checkout/src/common/third_party/base/anglebase/macros.h @@ -0,0 +1,17 @@ +// +// Copyright 2017 The ANGLE Project Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// +// macros.h: Compatiblity hacks for importing Chromium's MRUCache. + +#ifndef ANGLEBASE_MACROS_H_ +#define ANGLEBASE_MACROS_H_ + +// A macro to disallow the copy constructor and operator= functions. +// This should be used in the private: declarations for a class. +#define DISALLOW_COPY_AND_ASSIGN(TypeName) \ + TypeName(const TypeName &) = delete; \ + void operator=(const TypeName &) = delete + +#endif // ANGLEBASE_MACROS_H_ diff --git a/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_conversions.h b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_conversions.h new file mode 100644 index 0000000000..b37a36cd31 --- /dev/null +++ b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_conversions.h @@ -0,0 +1,177 @@ +// Copyright 2014 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef ANGLEBASE_NUMERICS_SAFE_CONVERSIONS_H_ +#define ANGLEBASE_NUMERICS_SAFE_CONVERSIONS_H_ + +#include <stddef.h> + +#include <limits> +#include <type_traits> + +#include "anglebase/logging.h" +#include "anglebase/numerics/safe_conversions_impl.h" + +namespace angle +{ + +namespace base +{ + +// Convenience function that returns true if the supplied value is in range +// for the destination type. +template <typename Dst, typename Src> +constexpr bool IsValueInRangeForNumericType(Src value) +{ + return internal::DstRangeRelationToSrcRange<Dst>(value) == internal::RANGE_VALID; +} + +// Convenience function for determining if a numeric value is negative without +// throwing compiler warnings on: unsigned(value) < 0. +template <typename T> +constexpr typename std::enable_if<std::numeric_limits<T>::is_signed, bool>::type IsValueNegative( + T value) +{ + static_assert(std::numeric_limits<T>::is_specialized, "Argument must be numeric."); + return value < 0; +} + +template <typename T> +constexpr typename std::enable_if<!std::numeric_limits<T>::is_signed, bool>::type IsValueNegative(T) +{ + static_assert(std::numeric_limits<T>::is_specialized, "Argument must be numeric."); + return false; +} + +// checked_cast<> is analogous to static_cast<> for numeric types, +// except that it CHECKs that the specified numeric conversion will not +// overflow or underflow. NaN source will always trigger a CHECK. +template <typename Dst, typename Src> +inline Dst checked_cast(Src value) +{ + CHECK(IsValueInRangeForNumericType<Dst>(value)); + return static_cast<Dst>(value); +} + +// HandleNaN will cause this class to CHECK(false). +struct SaturatedCastNaNBehaviorCheck +{ + template <typename T> + static T HandleNaN() + { + CHECK(false); + return T(); + } +}; + +// HandleNaN will return 0 in this case. +struct SaturatedCastNaNBehaviorReturnZero +{ + template <typename T> + static constexpr T HandleNaN() + { + return T(); + } +}; + +namespace internal +{ +// This wrapper is used for C++11 constexpr support by avoiding the declaration +// of local variables in the saturated_cast template function. +template <typename Dst, class NaNHandler, typename Src> +constexpr Dst saturated_cast_impl(const Src value, const RangeConstraint constraint) +{ + return constraint == RANGE_VALID + ? static_cast<Dst>(value) + : (constraint == RANGE_UNDERFLOW + ? std::numeric_limits<Dst>::min() + : (constraint == RANGE_OVERFLOW + ? std::numeric_limits<Dst>::max() + : (constraint == RANGE_INVALID + ? NaNHandler::template HandleNaN<Dst>() + : (NOTREACHED(), static_cast<Dst>(value))))); +} +} // namespace internal + +// saturated_cast<> is analogous to static_cast<> for numeric types, except +// that the specified numeric conversion will saturate rather than overflow or +// underflow. NaN assignment to an integral will defer the behavior to a +// specified class. By default, it will return 0. +template <typename Dst, class NaNHandler = SaturatedCastNaNBehaviorReturnZero, typename Src> +constexpr Dst saturated_cast(Src value) +{ + return std::numeric_limits<Dst>::is_iec559 + ? static_cast<Dst>(value) // Floating point optimization. + : internal::saturated_cast_impl<Dst, NaNHandler>( + value, internal::DstRangeRelationToSrcRange<Dst>(value)); +} + +// strict_cast<> is analogous to static_cast<> for numeric types, except that +// it will cause a compile failure if the destination type is not large enough +// to contain any value in the source type. It performs no runtime checking. +template <typename Dst, typename Src> +constexpr Dst strict_cast(Src value) +{ + static_assert(std::numeric_limits<Src>::is_specialized, "Argument must be numeric."); + static_assert(std::numeric_limits<Dst>::is_specialized, "Result must be numeric."); + static_assert((internal::StaticDstRangeRelationToSrcRange<Dst, Src>::value == + internal::NUMERIC_RANGE_CONTAINED), + "The numeric conversion is out of range for this type. You " + "should probably use one of the following conversion " + "mechanisms on the value you want to pass:\n" + "- base::checked_cast\n" + "- base::saturated_cast\n" + "- base::CheckedNumeric"); + + return static_cast<Dst>(value); +} + +// StrictNumeric implements compile time range checking between numeric types by +// wrapping assignment operations in a strict_cast. This class is intended to be +// used for function arguments and return types, to ensure the destination type +// can always contain the source type. This is essentially the same as enforcing +// -Wconversion in gcc and C4302 warnings on MSVC, but it can be applied +// incrementally at API boundaries, making it easier to convert code so that it +// compiles cleanly with truncation warnings enabled. +// This template should introduce no runtime overhead, but it also provides no +// runtime checking of any of the associated mathematical operations. Use +// CheckedNumeric for runtime range checks of the actual value being assigned. +template <typename T> +class StrictNumeric +{ + public: + typedef T type; + + constexpr StrictNumeric() : value_(0) {} + + // Copy constructor. + template <typename Src> + constexpr StrictNumeric(const StrictNumeric<Src> &rhs) : value_(strict_cast<T>(rhs.value_)) + {} + + // This is not an explicit constructor because we implicitly upgrade regular + // numerics to StrictNumerics to make them easier to use. + template <typename Src> + constexpr StrictNumeric(Src value) : value_(strict_cast<T>(value)) + {} + + // The numeric cast operator basically handles all the magic. + template <typename Dst> + constexpr operator Dst() const + { + return strict_cast<Dst>(value_); + } + + private: + const T value_; +}; + +// Explicitly make a shorter size_t typedef for convenience. +typedef StrictNumeric<size_t> SizeT; + +} // namespace base + +} // namespace angle + +#endif // ANGLEBASE_NUMERICS_SAFE_CONVERSIONS_H_ diff --git a/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_conversions_impl.h b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_conversions_impl.h new file mode 100644 index 0000000000..67cc4c9294 --- /dev/null +++ b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_conversions_impl.h @@ -0,0 +1,273 @@ +// Copyright 2014 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef ANGLEBASE_NUMERICS_SAFE_CONVERSIONS_IMPL_H_ +#define ANGLEBASE_NUMERICS_SAFE_CONVERSIONS_IMPL_H_ + +#include <limits.h> +#include <stdint.h> + +#include <climits> +#include <limits> + +namespace angle +{ + +namespace base +{ +namespace internal +{ + +// The std library doesn't provide a binary max_exponent for integers, however +// we can compute one by adding one to the number of non-sign bits. This allows +// for accurate range comparisons between floating point and integer types. +template <typename NumericType> +struct MaxExponent +{ + static_assert(std::is_arithmetic<NumericType>::value, "Argument must be numeric."); + static const int value = + std::numeric_limits<NumericType>::is_iec559 + ? std::numeric_limits<NumericType>::max_exponent + : (sizeof(NumericType) * CHAR_BIT + 1 - std::numeric_limits<NumericType>::is_signed); +}; + +enum IntegerRepresentation +{ + INTEGER_REPRESENTATION_UNSIGNED, + INTEGER_REPRESENTATION_SIGNED +}; + +// A range for a given nunmeric Src type is contained for a given numeric Dst +// type if both numeric_limits<Src>::max() <= numeric_limits<Dst>::max() and +// numeric_limits<Src>::min() >= numeric_limits<Dst>::min() are true. +// We implement this as template specializations rather than simple static +// comparisons to ensure type correctness in our comparisons. +enum NumericRangeRepresentation +{ + NUMERIC_RANGE_NOT_CONTAINED, + NUMERIC_RANGE_CONTAINED +}; + +// Helper templates to statically determine if our destination type can contain +// maximum and minimum values represented by the source type. + +template <typename Dst, + typename Src, + IntegerRepresentation DstSign = std::numeric_limits<Dst>::is_signed + ? INTEGER_REPRESENTATION_SIGNED + : INTEGER_REPRESENTATION_UNSIGNED, + IntegerRepresentation SrcSign = std::numeric_limits<Src>::is_signed + ? INTEGER_REPRESENTATION_SIGNED + : INTEGER_REPRESENTATION_UNSIGNED> +struct StaticDstRangeRelationToSrcRange; + +// Same sign: Dst is guaranteed to contain Src only if its range is equal or +// larger. +template <typename Dst, typename Src, IntegerRepresentation Sign> +struct StaticDstRangeRelationToSrcRange<Dst, Src, Sign, Sign> +{ + static const NumericRangeRepresentation value = + MaxExponent<Dst>::value >= MaxExponent<Src>::value ? NUMERIC_RANGE_CONTAINED + : NUMERIC_RANGE_NOT_CONTAINED; +}; + +// Unsigned to signed: Dst is guaranteed to contain source only if its range is +// larger. +template <typename Dst, typename Src> +struct StaticDstRangeRelationToSrcRange<Dst, + Src, + INTEGER_REPRESENTATION_SIGNED, + INTEGER_REPRESENTATION_UNSIGNED> +{ + static const NumericRangeRepresentation value = + MaxExponent<Dst>::value > MaxExponent<Src>::value ? NUMERIC_RANGE_CONTAINED + : NUMERIC_RANGE_NOT_CONTAINED; +}; + +// Signed to unsigned: Dst cannot be statically determined to contain Src. +template <typename Dst, typename Src> +struct StaticDstRangeRelationToSrcRange<Dst, + Src, + INTEGER_REPRESENTATION_UNSIGNED, + INTEGER_REPRESENTATION_SIGNED> +{ + static const NumericRangeRepresentation value = NUMERIC_RANGE_NOT_CONTAINED; +}; + +enum RangeConstraint : unsigned char +{ + RANGE_VALID = 0x0, // Value can be represented by the destination type. + RANGE_UNDERFLOW = 0x1, // Value would overflow. + RANGE_OVERFLOW = 0x2, // Value would underflow. + RANGE_INVALID = RANGE_UNDERFLOW | RANGE_OVERFLOW // Invalid (i.e. NaN). +}; + +// Helper function for coercing an int back to a RangeContraint. +constexpr RangeConstraint GetRangeConstraint(int integer_range_constraint) +{ + // TODO(jschuh): Once we get full C++14 support we want this + // assert(integer_range_constraint >= RANGE_VALID && + // integer_range_constraint <= RANGE_INVALID) + return static_cast<RangeConstraint>(integer_range_constraint); +} + +// This function creates a RangeConstraint from an upper and lower bound +// check by taking advantage of the fact that only NaN can be out of range in +// both directions at once. +constexpr inline RangeConstraint GetRangeConstraint(bool is_in_upper_bound, bool is_in_lower_bound) +{ + return GetRangeConstraint((is_in_upper_bound ? 0 : RANGE_OVERFLOW) | + (is_in_lower_bound ? 0 : RANGE_UNDERFLOW)); +} + +// The following helper template addresses a corner case in range checks for +// conversion from a floating-point type to an integral type of smaller range +// but larger precision (e.g. float -> unsigned). The problem is as follows: +// 1. Integral maximum is always one less than a power of two, so it must be +// truncated to fit the mantissa of the floating point. The direction of +// rounding is implementation defined, but by default it's always IEEE +// floats, which round to nearest and thus result in a value of larger +// magnitude than the integral value. +// Example: float f = UINT_MAX; // f is 4294967296f but UINT_MAX +// // is 4294967295u. +// 2. If the floating point value is equal to the promoted integral maximum +// value, a range check will erroneously pass. +// Example: (4294967296f <= 4294967295u) // This is true due to a precision +// // loss in rounding up to float. +// 3. When the floating point value is then converted to an integral, the +// resulting value is out of range for the target integral type and +// thus is implementation defined. +// Example: unsigned u = (float)INT_MAX; // u will typically overflow to 0. +// To fix this bug we manually truncate the maximum value when the destination +// type is an integral of larger precision than the source floating-point type, +// such that the resulting maximum is represented exactly as a floating point. +template <typename Dst, typename Src> +struct NarrowingRange +{ + typedef typename std::numeric_limits<Src> SrcLimits; + typedef typename std::numeric_limits<Dst> DstLimits; + // The following logic avoids warnings where the max function is + // instantiated with invalid values for a bit shift (even though + // such a function can never be called). + static const int shift = + (MaxExponent<Src>::value > MaxExponent<Dst>::value && + SrcLimits::digits < DstLimits::digits && SrcLimits::is_iec559 && DstLimits::is_integer) + ? (DstLimits::digits - SrcLimits::digits) + : 0; + + static constexpr Dst max() + { + // We use UINTMAX_C below to avoid compiler warnings about shifting floating + // points. Since it's a compile time calculation, it shouldn't have any + // performance impact. + return DstLimits::max() - static_cast<Dst>((UINTMAX_C(1) << shift) - 1); + } + + static constexpr Dst min() + { + return std::numeric_limits<Dst>::is_iec559 ? -DstLimits::max() : DstLimits::min(); + } +}; + +template <typename Dst, + typename Src, + IntegerRepresentation DstSign = std::numeric_limits<Dst>::is_signed + ? INTEGER_REPRESENTATION_SIGNED + : INTEGER_REPRESENTATION_UNSIGNED, + IntegerRepresentation SrcSign = std::numeric_limits<Src>::is_signed + ? INTEGER_REPRESENTATION_SIGNED + : INTEGER_REPRESENTATION_UNSIGNED, + NumericRangeRepresentation DstRange = StaticDstRangeRelationToSrcRange<Dst, Src>::value> +struct DstRangeRelationToSrcRangeImpl; + +// The following templates are for ranges that must be verified at runtime. We +// split it into checks based on signedness to avoid confusing casts and +// compiler warnings on signed an unsigned comparisons. + +// Dst range is statically determined to contain Src: Nothing to check. +template <typename Dst, typename Src, IntegerRepresentation DstSign, IntegerRepresentation SrcSign> +struct DstRangeRelationToSrcRangeImpl<Dst, Src, DstSign, SrcSign, NUMERIC_RANGE_CONTAINED> +{ + static constexpr RangeConstraint Check(Src value) { return RANGE_VALID; } +}; + +// Signed to signed narrowing: Both the upper and lower boundaries may be +// exceeded. +template <typename Dst, typename Src> +struct DstRangeRelationToSrcRangeImpl<Dst, + Src, + INTEGER_REPRESENTATION_SIGNED, + INTEGER_REPRESENTATION_SIGNED, + NUMERIC_RANGE_NOT_CONTAINED> +{ + static constexpr RangeConstraint Check(Src value) + { + return GetRangeConstraint((value <= NarrowingRange<Dst, Src>::max()), + (value >= NarrowingRange<Dst, Src>::min())); + } +}; + +// Unsigned to unsigned narrowing: Only the upper boundary can be exceeded. +template <typename Dst, typename Src> +struct DstRangeRelationToSrcRangeImpl<Dst, + Src, + INTEGER_REPRESENTATION_UNSIGNED, + INTEGER_REPRESENTATION_UNSIGNED, + NUMERIC_RANGE_NOT_CONTAINED> +{ + static constexpr RangeConstraint Check(Src value) + { + return GetRangeConstraint(value <= NarrowingRange<Dst, Src>::max(), true); + } +}; + +// Unsigned to signed: The upper boundary may be exceeded. +template <typename Dst, typename Src> +struct DstRangeRelationToSrcRangeImpl<Dst, + Src, + INTEGER_REPRESENTATION_SIGNED, + INTEGER_REPRESENTATION_UNSIGNED, + NUMERIC_RANGE_NOT_CONTAINED> +{ + static constexpr RangeConstraint Check(Src value) + { + return sizeof(Dst) > sizeof(Src) + ? RANGE_VALID + : GetRangeConstraint(value <= static_cast<Src>(NarrowingRange<Dst, Src>::max()), + true); + } +}; + +// Signed to unsigned: The upper boundary may be exceeded for a narrower Dst, +// and any negative value exceeds the lower boundary. +template <typename Dst, typename Src> +struct DstRangeRelationToSrcRangeImpl<Dst, + Src, + INTEGER_REPRESENTATION_UNSIGNED, + INTEGER_REPRESENTATION_SIGNED, + NUMERIC_RANGE_NOT_CONTAINED> +{ + static constexpr RangeConstraint Check(Src value) + { + return (MaxExponent<Dst>::value >= MaxExponent<Src>::value) + ? GetRangeConstraint(true, value >= static_cast<Src>(0)) + : GetRangeConstraint(value <= static_cast<Src>(NarrowingRange<Dst, Src>::max()), + value >= static_cast<Src>(0)); + } +}; + +template <typename Dst, typename Src> +constexpr RangeConstraint DstRangeRelationToSrcRange(Src value) +{ + static_assert(std::numeric_limits<Src>::is_specialized, "Argument must be numeric."); + static_assert(std::numeric_limits<Dst>::is_specialized, "Result must be numeric."); + return DstRangeRelationToSrcRangeImpl<Dst, Src>::Check(value); +} + +} // namespace internal +} // namespace base + +} // namespace angle + +#endif // ANGLEBASE_NUMERICS_SAFE_CONVERSIONS_IMPL_H_ diff --git a/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_math.h b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_math.h new file mode 100644 index 0000000000..16d9b8549f --- /dev/null +++ b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_math.h @@ -0,0 +1,334 @@ +// Copyright 2014 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef ANGLEBASE_NUMERICS_SAFE_MATH_H_ +#define ANGLEBASE_NUMERICS_SAFE_MATH_H_ + +#include <stddef.h> + +#include <limits> +#include <type_traits> + +#include "anglebase/logging.h" +#include "anglebase/numerics/safe_math_impl.h" + +namespace angle +{ + +namespace base +{ + +namespace internal +{ + +// CheckedNumeric implements all the logic and operators for detecting integer +// boundary conditions such as overflow, underflow, and invalid conversions. +// The CheckedNumeric type implicitly converts from floating point and integer +// data types, and contains overloads for basic arithmetic operations (i.e.: +, +// -, *, /, %). +// +// The following methods convert from CheckedNumeric to standard numeric values: +// IsValid() - Returns true if the underlying numeric value is valid (i.e. has +// has not wrapped and is not the result of an invalid conversion). +// ValueOrDie() - Returns the underlying value. If the state is not valid this +// call will crash on a CHECK. +// ValueOrDefault() - Returns the current value, or the supplied default if the +// state is not valid. +// ValueFloating() - Returns the underlying floating point value (valid only +// only for floating point CheckedNumeric types). +// +// Bitwise operations are explicitly not supported, because correct +// handling of some cases (e.g. sign manipulation) is ambiguous. Comparison +// operations are explicitly not supported because they could result in a crash +// on a CHECK condition. You should use patterns like the following for these +// operations: +// Bitwise operation: +// CheckedNumeric<int> checked_int = untrusted_input_value; +// int x = checked_int.ValueOrDefault(0) | kFlagValues; +// Comparison: +// CheckedNumeric<size_t> checked_size = untrusted_input_value; +// checked_size += HEADER LENGTH; +// if (checked_size.IsValid() && checked_size.ValueOrDie() < buffer_size) +// Do stuff... +template <typename T> +class CheckedNumeric +{ + static_assert(std::is_arithmetic<T>::value, "CheckedNumeric<T>: T must be a numeric type."); + + public: + typedef T type; + + CheckedNumeric() {} + + // Copy constructor. + template <typename Src> + CheckedNumeric(const CheckedNumeric<Src> &rhs) : state_(rhs.ValueUnsafe(), rhs.validity()) + {} + + template <typename Src> + CheckedNumeric(Src value, RangeConstraint validity) : state_(value, validity) + {} + + // This is not an explicit constructor because we implicitly upgrade regular + // numerics to CheckedNumerics to make them easier to use. + template <typename Src> + CheckedNumeric(Src value) // NOLINT(runtime/explicit) + : state_(value) + { + static_assert(std::numeric_limits<Src>::is_specialized, "Argument must be numeric."); + } + + // This is not an explicit constructor because we want a seamless conversion + // from StrictNumeric types. + template <typename Src> + CheckedNumeric(StrictNumeric<Src> value) // NOLINT(runtime/explicit) + : state_(static_cast<Src>(value)) + {} + + // IsValid() is the public API to test if a CheckedNumeric is currently valid. + bool IsValid() const { return validity() == RANGE_VALID; } + + // AssignIfValid(Dst) - Assigns the underlying value if it is currently valid and is within the + // range supported by the destination type. Returns true if successful and false otherwise. + template <typename Dst> + constexpr bool AssignIfValid(Dst *result) const + { + return IsValid() ? ((*result = static_cast<Dst>(state_.value())), true) : false; + } + + // ValueOrDie() The primary accessor for the underlying value. If the current + // state is not valid it will CHECK and crash. + T ValueOrDie() const + { + CHECK(IsValid()); + return state_.value(); + } + + // ValueOrDefault(T default_value) A convenience method that returns the + // current value if the state is valid, and the supplied default_value for + // any other state. + T ValueOrDefault(T default_value) const { return IsValid() ? state_.value() : default_value; } + + // ValueFloating() - Since floating point values include their validity state, + // we provide an easy method for extracting them directly, without a risk of + // crashing on a CHECK. + T ValueFloating() const + { + static_assert(std::numeric_limits<T>::is_iec559, "Argument must be float."); + return CheckedNumeric<T>::cast(*this).ValueUnsafe(); + } + + // validity() - DO NOT USE THIS IN EXTERNAL CODE - It is public right now for + // tests and to avoid a big matrix of friend operator overloads. But the + // values it returns are likely to change in the future. + // Returns: current validity state (i.e. valid, overflow, underflow, nan). + // TODO(jschuh): crbug.com/332611 Figure out and implement semantics for + // saturation/wrapping so we can expose this state consistently and implement + // saturated arithmetic. + RangeConstraint validity() const { return state_.validity(); } + + // ValueUnsafe() - DO NOT USE THIS IN EXTERNAL CODE - It is public right now + // for tests and to avoid a big matrix of friend operator overloads. But the + // values it returns are likely to change in the future. + // Returns: the raw numeric value, regardless of the current state. + // TODO(jschuh): crbug.com/332611 Figure out and implement semantics for + // saturation/wrapping so we can expose this state consistently and implement + // saturated arithmetic. + T ValueUnsafe() const { return state_.value(); } + + // Prototypes for the supported arithmetic operator overloads. + template <typename Src> + CheckedNumeric &operator+=(Src rhs); + template <typename Src> + CheckedNumeric &operator-=(Src rhs); + template <typename Src> + CheckedNumeric &operator*=(Src rhs); + template <typename Src> + CheckedNumeric &operator/=(Src rhs); + template <typename Src> + CheckedNumeric &operator%=(Src rhs); + + CheckedNumeric operator-() const + { + RangeConstraint validity; + T value = CheckedNeg(state_.value(), &validity); + // Negation is always valid for floating point. + if (std::numeric_limits<T>::is_iec559) + return CheckedNumeric<T>(value); + + validity = GetRangeConstraint(state_.validity() | validity); + return CheckedNumeric<T>(value, validity); + } + + CheckedNumeric Abs() const + { + RangeConstraint validity; + T value = CheckedAbs(state_.value(), &validity); + // Absolute value is always valid for floating point. + if (std::numeric_limits<T>::is_iec559) + return CheckedNumeric<T>(value); + + validity = GetRangeConstraint(state_.validity() | validity); + return CheckedNumeric<T>(value, validity); + } + + // This function is available only for integral types. It returns an unsigned + // integer of the same width as the source type, containing the absolute value + // of the source, and properly handling signed min. + CheckedNumeric<typename UnsignedOrFloatForSize<T>::type> UnsignedAbs() const + { + return CheckedNumeric<typename UnsignedOrFloatForSize<T>::type>( + CheckedUnsignedAbs(state_.value()), state_.validity()); + } + + CheckedNumeric &operator++() + { + *this += 1; + return *this; + } + + CheckedNumeric operator++(int) + { + CheckedNumeric value = *this; + *this += 1; + return value; + } + + CheckedNumeric &operator--() + { + *this -= 1; + return *this; + } + + CheckedNumeric operator--(int) + { + CheckedNumeric value = *this; + *this -= 1; + return value; + } + + // These static methods behave like a convenience cast operator targeting + // the desired CheckedNumeric type. As an optimization, a reference is + // returned when Src is the same type as T. + template <typename Src> + static CheckedNumeric<T> cast( + Src u, + typename std::enable_if<std::numeric_limits<Src>::is_specialized, int>::type = 0) + { + return u; + } + + template <typename Src> + static CheckedNumeric<T> cast( + const CheckedNumeric<Src> &u, + typename std::enable_if<!std::is_same<Src, T>::value, int>::type = 0) + { + return u; + } + + static const CheckedNumeric<T> &cast(const CheckedNumeric<T> &u) { return u; } + + private: + template <typename NumericType> + struct UnderlyingType + { + using type = NumericType; + }; + + template <typename NumericType> + struct UnderlyingType<CheckedNumeric<NumericType>> + { + using type = NumericType; + }; + + CheckedNumericState<T> state_; +}; + +// This is the boilerplate for the standard arithmetic operator overloads. A +// macro isn't the prettiest solution, but it beats rewriting these five times. +// Some details worth noting are: +// * We apply the standard arithmetic promotions. +// * We skip range checks for floating points. +// * We skip range checks for destination integers with sufficient range. +// TODO(jschuh): extract these out into templates. +#define ANGLEBASE_NUMERIC_ARITHMETIC_OPERATORS(NAME, OP, COMPOUND_OP) \ + /* Binary arithmetic operator for CheckedNumerics of the same type. */ \ + template <typename T> \ + CheckedNumeric<typename ArithmeticPromotion<T>::type> operator OP( \ + const CheckedNumeric<T> &lhs, const CheckedNumeric<T> &rhs) \ + { \ + typedef typename ArithmeticPromotion<T>::type Promotion; \ + /* Floating point always takes the fast path */ \ + if (std::numeric_limits<T>::is_iec559) \ + return CheckedNumeric<T>(lhs.ValueUnsafe() OP rhs.ValueUnsafe()); \ + if (IsIntegerArithmeticSafe<Promotion, T, T>::value) \ + return CheckedNumeric<Promotion>(lhs.ValueUnsafe() OP rhs.ValueUnsafe(), \ + GetRangeConstraint(rhs.validity() | lhs.validity())); \ + RangeConstraint validity = RANGE_VALID; \ + T result = \ + static_cast<T>(Checked##NAME(static_cast<Promotion>(lhs.ValueUnsafe()), \ + static_cast<Promotion>(rhs.ValueUnsafe()), &validity)); \ + return CheckedNumeric<Promotion>( \ + result, GetRangeConstraint(validity | lhs.validity() | rhs.validity())); \ + } \ + /* Assignment arithmetic operator implementation from CheckedNumeric. */ \ + template <typename T> \ + template <typename Src> \ + CheckedNumeric<T> &CheckedNumeric<T>::operator COMPOUND_OP(Src rhs) \ + { \ + *this = CheckedNumeric<T>::cast(*this) \ + OP CheckedNumeric<typename UnderlyingType<Src>::type>::cast(rhs); \ + return *this; \ + } \ + /* Binary arithmetic operator for CheckedNumeric of different type. */ \ + template <typename T, typename Src> \ + CheckedNumeric<typename ArithmeticPromotion<T, Src>::type> operator OP( \ + const CheckedNumeric<Src> &lhs, const CheckedNumeric<T> &rhs) \ + { \ + typedef typename ArithmeticPromotion<T, Src>::type Promotion; \ + if (IsIntegerArithmeticSafe<Promotion, T, Src>::value) \ + return CheckedNumeric<Promotion>(lhs.ValueUnsafe() OP rhs.ValueUnsafe(), \ + GetRangeConstraint(rhs.validity() | lhs.validity())); \ + return CheckedNumeric<Promotion>::cast(lhs) OP CheckedNumeric<Promotion>::cast(rhs); \ + } \ + /* Binary arithmetic operator for left CheckedNumeric and right numeric. */ \ + template <typename T, typename Src, \ + typename std::enable_if<std::is_arithmetic<Src>::value>::type * = nullptr> \ + CheckedNumeric<typename ArithmeticPromotion<T, Src>::type> operator OP( \ + const CheckedNumeric<T> &lhs, Src rhs) \ + { \ + typedef typename ArithmeticPromotion<T, Src>::type Promotion; \ + if (IsIntegerArithmeticSafe<Promotion, T, Src>::value) \ + return CheckedNumeric<Promotion>(lhs.ValueUnsafe() OP rhs, lhs.validity()); \ + return CheckedNumeric<Promotion>::cast(lhs) OP CheckedNumeric<Promotion>::cast(rhs); \ + } \ + /* Binary arithmetic operator for left numeric and right CheckedNumeric. */ \ + template <typename T, typename Src, \ + typename std::enable_if<std::is_arithmetic<Src>::value>::type * = nullptr> \ + CheckedNumeric<typename ArithmeticPromotion<T, Src>::type> operator OP( \ + Src lhs, const CheckedNumeric<T> &rhs) \ + { \ + typedef typename ArithmeticPromotion<T, Src>::type Promotion; \ + if (IsIntegerArithmeticSafe<Promotion, T, Src>::value) \ + return CheckedNumeric<Promotion>(lhs OP rhs.ValueUnsafe(), rhs.validity()); \ + return CheckedNumeric<Promotion>::cast(lhs) OP CheckedNumeric<Promotion>::cast(rhs); \ + } + +ANGLEBASE_NUMERIC_ARITHMETIC_OPERATORS(Add, +, +=) +ANGLEBASE_NUMERIC_ARITHMETIC_OPERATORS(Sub, -, -=) +ANGLEBASE_NUMERIC_ARITHMETIC_OPERATORS(Mul, *, *=) +ANGLEBASE_NUMERIC_ARITHMETIC_OPERATORS(Div, /, /=) +ANGLEBASE_NUMERIC_ARITHMETIC_OPERATORS(Mod, %, %=) + +#undef ANGLEBASE_NUMERIC_ARITHMETIC_OPERATORS + +} // namespace internal + +using internal::CheckedNumeric; + +} // namespace base + +} // namespace angle + +#endif // ANGLEBASE_NUMERICS_SAFE_MATH_H_ diff --git a/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_math_impl.h b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_math_impl.h new file mode 100644 index 0000000000..778fb8daaa --- /dev/null +++ b/gfx/angle/checkout/src/common/third_party/base/anglebase/numerics/safe_math_impl.h @@ -0,0 +1,570 @@ +// Copyright 2014 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef ANGLEBASE_NUMERICS_SAFE_MATH_IMPL_H_ +#define ANGLEBASE_NUMERICS_SAFE_MATH_IMPL_H_ + +#include <stddef.h> +#include <stdint.h> + +#include <climits> +#include <cmath> +#include <cstdlib> +#include <limits> +#include <type_traits> + +#include "anglebase/numerics/safe_conversions.h" + +namespace angle +{ + +namespace base +{ +namespace internal +{ + +// Everything from here up to the floating point operations is portable C++, +// but it may not be fast. This code could be split based on +// platform/architecture and replaced with potentially faster implementations. + +// Integer promotion templates used by the portable checked integer arithmetic. +template <size_t Size, bool IsSigned> +struct IntegerForSizeAndSign; +template <> +struct IntegerForSizeAndSign<1, true> +{ + typedef int8_t type; +}; +template <> +struct IntegerForSizeAndSign<1, false> +{ + typedef uint8_t type; +}; +template <> +struct IntegerForSizeAndSign<2, true> +{ + typedef int16_t type; +}; +template <> +struct IntegerForSizeAndSign<2, false> +{ + typedef uint16_t type; +}; +template <> +struct IntegerForSizeAndSign<4, true> +{ + typedef int32_t type; +}; +template <> +struct IntegerForSizeAndSign<4, false> +{ + typedef uint32_t type; +}; +template <> +struct IntegerForSizeAndSign<8, true> +{ + typedef int64_t type; +}; +template <> +struct IntegerForSizeAndSign<8, false> +{ + typedef uint64_t type; +}; + +// WARNING: We have no IntegerForSizeAndSign<16, *>. If we ever add one to +// support 128-bit math, then the ArithmeticPromotion template below will need +// to be updated (or more likely replaced with a decltype expression). + +template <typename Integer> +struct UnsignedIntegerForSize +{ + typedef + typename std::enable_if<std::numeric_limits<Integer>::is_integer, + typename IntegerForSizeAndSign<sizeof(Integer), false>::type>::type + type; +}; + +template <typename Integer> +struct SignedIntegerForSize +{ + typedef + typename std::enable_if<std::numeric_limits<Integer>::is_integer, + typename IntegerForSizeAndSign<sizeof(Integer), true>::type>::type + type; +}; + +template <typename Integer> +struct TwiceWiderInteger +{ + typedef typename std::enable_if< + std::numeric_limits<Integer>::is_integer, + typename IntegerForSizeAndSign<sizeof(Integer) * 2, + std::numeric_limits<Integer>::is_signed>::type>::type type; +}; + +template <typename Integer> +struct PositionOfSignBit +{ + static const typename std::enable_if<std::numeric_limits<Integer>::is_integer, size_t>::type + value = CHAR_BIT * sizeof(Integer) - 1; +}; + +// This is used for UnsignedAbs, where we need to support floating-point +// template instantiations even though we don't actually support the operations. +// However, there is no corresponding implementation of e.g. CheckedUnsignedAbs, +// so the float versions will not compile. +template <typename Numeric, + bool IsInteger = std::numeric_limits<Numeric>::is_integer, + bool IsFloat = std::numeric_limits<Numeric>::is_iec559> +struct UnsignedOrFloatForSize; + +template <typename Numeric> +struct UnsignedOrFloatForSize<Numeric, true, false> +{ + typedef typename UnsignedIntegerForSize<Numeric>::type type; +}; + +template <typename Numeric> +struct UnsignedOrFloatForSize<Numeric, false, true> +{ + typedef Numeric type; +}; + +// Helper templates for integer manipulations. + +template <typename T> +constexpr bool HasSignBit(T x) +{ + // Cast to unsigned since right shift on signed is undefined. + return !!(static_cast<typename UnsignedIntegerForSize<T>::type>(x) >> + PositionOfSignBit<T>::value); +} + +// This wrapper undoes the standard integer promotions. +template <typename T> +constexpr T BinaryComplement(T x) +{ + return static_cast<T>(~x); +} + +// Here are the actual portable checked integer math implementations. +// TODO(jschuh): Break this code out from the enable_if pattern and find a clean +// way to coalesce things into the CheckedNumericState specializations below. + +template <typename T> +typename std::enable_if<std::numeric_limits<T>::is_integer, T>::type +CheckedAdd(T x, T y, RangeConstraint *validity) +{ + // Since the value of x+y is undefined if we have a signed type, we compute + // it using the unsigned type of the same size. + typedef typename UnsignedIntegerForSize<T>::type UnsignedDst; + UnsignedDst ux = static_cast<UnsignedDst>(x); + UnsignedDst uy = static_cast<UnsignedDst>(y); + UnsignedDst uresult = static_cast<UnsignedDst>(ux + uy); + // Addition is valid if the sign of (x + y) is equal to either that of x or + // that of y. + if (std::numeric_limits<T>::is_signed) + { + if (HasSignBit(BinaryComplement(static_cast<UnsignedDst>((uresult ^ ux) & (uresult ^ uy))))) + { + *validity = RANGE_VALID; + } + else + { // Direction of wrap is inverse of result sign. + *validity = HasSignBit(uresult) ? RANGE_OVERFLOW : RANGE_UNDERFLOW; + } + } + else + { // Unsigned is either valid or overflow. + *validity = BinaryComplement(x) >= y ? RANGE_VALID : RANGE_OVERFLOW; + } + return static_cast<T>(uresult); +} + +template <typename T> +typename std::enable_if<std::numeric_limits<T>::is_integer, T>::type +CheckedSub(T x, T y, RangeConstraint *validity) +{ + // Since the value of x+y is undefined if we have a signed type, we compute + // it using the unsigned type of the same size. + typedef typename UnsignedIntegerForSize<T>::type UnsignedDst; + UnsignedDst ux = static_cast<UnsignedDst>(x); + UnsignedDst uy = static_cast<UnsignedDst>(y); + UnsignedDst uresult = static_cast<UnsignedDst>(ux - uy); + // Subtraction is valid if either x and y have same sign, or (x-y) and x have + // the same sign. + if (std::numeric_limits<T>::is_signed) + { + if (HasSignBit(BinaryComplement(static_cast<UnsignedDst>((uresult ^ ux) & (ux ^ uy))))) + { + *validity = RANGE_VALID; + } + else + { // Direction of wrap is inverse of result sign. + *validity = HasSignBit(uresult) ? RANGE_OVERFLOW : RANGE_UNDERFLOW; + } + } + else + { // Unsigned is either valid or underflow. + *validity = x >= y ? RANGE_VALID : RANGE_UNDERFLOW; + } + return static_cast<T>(uresult); +} + +// Integer multiplication is a bit complicated. In the fast case we just +// we just promote to a twice wider type, and range check the result. In the +// slow case we need to manually check that the result won't be truncated by +// checking with division against the appropriate bound. +template <typename T> +typename std::enable_if<std::numeric_limits<T>::is_integer && sizeof(T) * 2 <= sizeof(uintmax_t), + T>::type +CheckedMul(T x, T y, RangeConstraint *validity) +{ + typedef typename TwiceWiderInteger<T>::type IntermediateType; + IntermediateType tmp = static_cast<IntermediateType>(x) * static_cast<IntermediateType>(y); + *validity = DstRangeRelationToSrcRange<T>(tmp); + return static_cast<T>(tmp); +} + +template <typename T> +typename std::enable_if<std::numeric_limits<T>::is_integer && std::numeric_limits<T>::is_signed && + (sizeof(T) * 2 > sizeof(uintmax_t)), + T>::type +CheckedMul(T x, T y, RangeConstraint *validity) +{ + // If either side is zero then the result will be zero. + if (!x || !y) + { + *validity = RANGE_VALID; + return static_cast<T>(0); + } + else if (x > 0) + { + if (y > 0) + *validity = x <= std::numeric_limits<T>::max() / y ? RANGE_VALID : RANGE_OVERFLOW; + else + *validity = y >= std::numeric_limits<T>::min() / x ? RANGE_VALID : RANGE_UNDERFLOW; + } + else + { + if (y > 0) + *validity = x >= std::numeric_limits<T>::min() / y ? RANGE_VALID : RANGE_UNDERFLOW; + else + *validity = y >= std::numeric_limits<T>::max() / x ? RANGE_VALID : RANGE_OVERFLOW; + } + + return static_cast<T>(x * y); +} + +template <typename T> +typename std::enable_if<std::numeric_limits<T>::is_integer && !std::numeric_limits<T>::is_signed && + (sizeof(T) * 2 > sizeof(uintmax_t)), + T>::type +CheckedMul(T x, T y, RangeConstraint *validity) +{ + *validity = (y == 0 || x <= std::numeric_limits<T>::max() / y) ? RANGE_VALID : RANGE_OVERFLOW; + return static_cast<T>(x * y); +} + +// Division just requires a check for an invalid negation on signed min/-1. +template <typename T> +T CheckedDiv(T x, + T y, + RangeConstraint *validity, + typename std::enable_if<std::numeric_limits<T>::is_integer, int>::type = 0) +{ + if (std::numeric_limits<T>::is_signed && x == std::numeric_limits<T>::min() && + y == static_cast<T>(-1)) + { + *validity = RANGE_OVERFLOW; + return std::numeric_limits<T>::min(); + } + + *validity = RANGE_VALID; + return static_cast<T>(x / y); +} + +template <typename T> +typename std::enable_if<std::numeric_limits<T>::is_integer && std::numeric_limits<T>::is_signed, + T>::type +CheckedMod(T x, T y, RangeConstraint *validity) +{ + *validity = y > 0 ? RANGE_VALID : RANGE_INVALID; + return static_cast<T>(x % y); +} + +template <typename T> +typename std::enable_if<std::numeric_limits<T>::is_integer && !std::numeric_limits<T>::is_signed, + T>::type +CheckedMod(T x, T y, RangeConstraint *validity) +{ + *validity = RANGE_VALID; + return static_cast<T>(x % y); +} + +template <typename T> +typename std::enable_if<std::numeric_limits<T>::is_integer && std::numeric_limits<T>::is_signed, + T>::type +CheckedNeg(T value, RangeConstraint *validity) +{ + *validity = value != std::numeric_limits<T>::min() ? RANGE_VALID : RANGE_OVERFLOW; + // The negation of signed min is min, so catch that one. + return static_cast<T>(-value); +} + +template <typename T> +typename std::enable_if<std::numeric_limits<T>::is_integer && !std::numeric_limits<T>::is_signed, + T>::type +CheckedNeg(T value, RangeConstraint *validity) +{ + // The only legal unsigned negation is zero. + *validity = value ? RANGE_UNDERFLOW : RANGE_VALID; + return static_cast<T>(-static_cast<typename SignedIntegerForSize<T>::type>(value)); +} + +template <typename T> +typename std::enable_if<std::numeric_limits<T>::is_integer && std::numeric_limits<T>::is_signed, + T>::type +CheckedAbs(T value, RangeConstraint *validity) +{ + *validity = value != std::numeric_limits<T>::min() ? RANGE_VALID : RANGE_OVERFLOW; + return static_cast<T>(std::abs(value)); +} + +template <typename T> +typename std::enable_if<std::numeric_limits<T>::is_integer && !std::numeric_limits<T>::is_signed, + T>::type +CheckedAbs(T value, RangeConstraint *validity) +{ + // T is unsigned, so |value| must already be positive. + *validity = RANGE_VALID; + return value; +} + +template <typename T> +typename std::enable_if<std::numeric_limits<T>::is_integer && std::numeric_limits<T>::is_signed, + typename UnsignedIntegerForSize<T>::type>::type +CheckedUnsignedAbs(T value) +{ + typedef typename UnsignedIntegerForSize<T>::type UnsignedT; + return value == std::numeric_limits<T>::min() + ? static_cast<UnsignedT>(std::numeric_limits<T>::max()) + 1 + : static_cast<UnsignedT>(std::abs(value)); +} + +template <typename T> +typename std::enable_if<std::numeric_limits<T>::is_integer && !std::numeric_limits<T>::is_signed, + T>::type +CheckedUnsignedAbs(T value) +{ + // T is unsigned, so |value| must already be positive. + return static_cast<T>(value); +} + +// These are the floating point stubs that the compiler needs to see. Only the +// negation operation is ever called. +#define ANGLEBASE_FLOAT_ARITHMETIC_STUBS(NAME) \ + template <typename T> \ + typename std::enable_if<std::numeric_limits<T>::is_iec559, T>::type Checked##NAME( \ + T, T, RangeConstraint *) \ + { \ + NOTREACHED(); \ + return static_cast<T>(0); \ + } + +ANGLEBASE_FLOAT_ARITHMETIC_STUBS(Add) +ANGLEBASE_FLOAT_ARITHMETIC_STUBS(Sub) +ANGLEBASE_FLOAT_ARITHMETIC_STUBS(Mul) +ANGLEBASE_FLOAT_ARITHMETIC_STUBS(Div) +ANGLEBASE_FLOAT_ARITHMETIC_STUBS(Mod) + +#undef ANGLEBASE_FLOAT_ARITHMETIC_STUBS + +template <typename T> +typename std::enable_if<std::numeric_limits<T>::is_iec559, T>::type CheckedNeg(T value, + RangeConstraint *) +{ + return static_cast<T>(-value); +} + +template <typename T> +typename std::enable_if<std::numeric_limits<T>::is_iec559, T>::type CheckedAbs(T value, + RangeConstraint *) +{ + return static_cast<T>(std::abs(value)); +} + +// Floats carry around their validity state with them, but integers do not. So, +// we wrap the underlying value in a specialization in order to hide that detail +// and expose an interface via accessors. +enum NumericRepresentation +{ + NUMERIC_INTEGER, + NUMERIC_FLOATING, + NUMERIC_UNKNOWN +}; + +template <typename NumericType> +struct GetNumericRepresentation +{ + static const NumericRepresentation value = + std::numeric_limits<NumericType>::is_integer + ? NUMERIC_INTEGER + : (std::numeric_limits<NumericType>::is_iec559 ? NUMERIC_FLOATING : NUMERIC_UNKNOWN); +}; + +template <typename T, NumericRepresentation type = GetNumericRepresentation<T>::value> +class CheckedNumericState +{}; + +// Integrals require quite a bit of additional housekeeping to manage state. +template <typename T> +class CheckedNumericState<T, NUMERIC_INTEGER> +{ + private: + T value_; + RangeConstraint validity_ : CHAR_BIT; // Actually requires only two bits. + + public: + template <typename Src, NumericRepresentation type> + friend class CheckedNumericState; + + CheckedNumericState() : value_(0), validity_(RANGE_VALID) {} + + template <typename Src> + CheckedNumericState(Src value, RangeConstraint validity) + : value_(static_cast<T>(value)), + validity_(GetRangeConstraint(validity | DstRangeRelationToSrcRange<T>(value))) + { + static_assert(std::numeric_limits<Src>::is_specialized, "Argument must be numeric."); + } + + // Copy constructor. + template <typename Src> + CheckedNumericState(const CheckedNumericState<Src> &rhs) + : value_(static_cast<T>(rhs.value())), + validity_(GetRangeConstraint(rhs.validity() | DstRangeRelationToSrcRange<T>(rhs.value()))) + {} + + template <typename Src> + explicit CheckedNumericState( + Src value, + typename std::enable_if<std::numeric_limits<Src>::is_specialized, int>::type = 0) + : value_(static_cast<T>(value)), validity_(DstRangeRelationToSrcRange<T>(value)) + {} + + RangeConstraint validity() const { return validity_; } + T value() const { return value_; } +}; + +// Floating points maintain their own validity, but need translation wrappers. +template <typename T> +class CheckedNumericState<T, NUMERIC_FLOATING> +{ + private: + T value_; + + public: + template <typename Src, NumericRepresentation type> + friend class CheckedNumericState; + + CheckedNumericState() : value_(0.0) {} + + template <typename Src> + CheckedNumericState( + Src value, + RangeConstraint validity, + typename std::enable_if<std::numeric_limits<Src>::is_integer, int>::type = 0) + { + switch (DstRangeRelationToSrcRange<T>(value)) + { + case RANGE_VALID: + value_ = static_cast<T>(value); + break; + + case RANGE_UNDERFLOW: + value_ = -std::numeric_limits<T>::infinity(); + break; + + case RANGE_OVERFLOW: + value_ = std::numeric_limits<T>::infinity(); + break; + + case RANGE_INVALID: + value_ = std::numeric_limits<T>::quiet_NaN(); + break; + + default: + NOTREACHED(); + } + } + + template <typename Src> + explicit CheckedNumericState( + Src value, + typename std::enable_if<std::numeric_limits<Src>::is_specialized, int>::type = 0) + : value_(static_cast<T>(value)) + {} + + // Copy constructor. + template <typename Src> + CheckedNumericState(const CheckedNumericState<Src> &rhs) : value_(static_cast<T>(rhs.value())) + {} + + RangeConstraint validity() const + { + return GetRangeConstraint(value_ <= std::numeric_limits<T>::max(), + value_ >= -std::numeric_limits<T>::max()); + } + T value() const { return value_; } +}; + +// For integers less than 128-bit and floats 32-bit or larger, we have the type +// with the larger maximum exponent take precedence. +enum ArithmeticPromotionCategory +{ + LEFT_PROMOTION, + RIGHT_PROMOTION +}; + +template <typename Lhs, + typename Rhs = Lhs, + ArithmeticPromotionCategory Promotion = + (MaxExponent<Lhs>::value > MaxExponent<Rhs>::value) ? LEFT_PROMOTION + : RIGHT_PROMOTION> +struct ArithmeticPromotion; + +template <typename Lhs, typename Rhs> +struct ArithmeticPromotion<Lhs, Rhs, LEFT_PROMOTION> +{ + typedef Lhs type; +}; + +template <typename Lhs, typename Rhs> +struct ArithmeticPromotion<Lhs, Rhs, RIGHT_PROMOTION> +{ + typedef Rhs type; +}; + +// We can statically check if operations on the provided types can wrap, so we +// can skip the checked operations if they're not needed. So, for an integer we +// care if the destination type preserves the sign and is twice the width of +// the source. +template <typename T, typename Lhs, typename Rhs> +struct IsIntegerArithmeticSafe +{ + static const bool value = + !std::numeric_limits<T>::is_iec559 && + StaticDstRangeRelationToSrcRange<T, Lhs>::value == NUMERIC_RANGE_CONTAINED && + sizeof(T) >= (2 * sizeof(Lhs)) && + StaticDstRangeRelationToSrcRange<T, Rhs>::value != NUMERIC_RANGE_CONTAINED && + sizeof(T) >= (2 * sizeof(Rhs)); +}; + +} // namespace internal +} // namespace base + +} // namespace angle + +#endif // ANGLEBASE_NUMERICS_SAFE_MATH_IMPL_H_ diff --git a/gfx/angle/checkout/src/common/third_party/base/anglebase/sha1.cc b/gfx/angle/checkout/src/common/third_party/base/anglebase/sha1.cc new file mode 100644 index 0000000000..cb88ba06e1 --- /dev/null +++ b/gfx/angle/checkout/src/common/third_party/base/anglebase/sha1.cc @@ -0,0 +1,245 @@ +// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "anglebase/sha1.h" + +#include <stddef.h> +#include <stdint.h> +#include <string.h> + +#include "anglebase/sys_byteorder.h" + +namespace angle +{ + +namespace base +{ + +// Implementation of SHA-1. Only handles data in byte-sized blocks, +// which simplifies the code a fair bit. + +// Identifier names follow notation in FIPS PUB 180-3, where you'll +// also find a description of the algorithm: +// http://csrc.nist.gov/publications/fips/fips180-3/fips180-3_final.pdf + +// Usage example: +// +// SecureHashAlgorithm sha; +// while(there is data to hash) +// sha.Update(moredata, size of data); +// sha.Final(); +// memcpy(somewhere, sha.Digest(), 20); +// +// to reuse the instance of sha, call sha.Init(); + +// TODO(jhawkins): Replace this implementation with a per-platform +// implementation using each platform's crypto library. See +// http://crbug.com/47218 + +class SecureHashAlgorithm +{ + public: + SecureHashAlgorithm() { Init(); } + + static const int kDigestSizeBytes; + + void Init(); + void Update(const void *data, size_t nbytes); + void Final(); + + // 20 bytes of message digest. + const unsigned char *Digest() const { return reinterpret_cast<const unsigned char *>(H); } + + private: + void Pad(); + void Process(); + + uint32_t A, B, C, D, E; + + uint32_t H[5]; + + union { + uint32_t W[80]; + uint8_t M[64]; + }; + + uint32_t cursor; + uint64_t l; +}; + +static inline uint32_t f(uint32_t t, uint32_t B, uint32_t C, uint32_t D) +{ + if (t < 20) + { + return (B & C) | ((~B) & D); + } + else if (t < 40) + { + return B ^ C ^ D; + } + else if (t < 60) + { + return (B & C) | (B & D) | (C & D); + } + else + { + return B ^ C ^ D; + } +} + +static inline uint32_t S(uint32_t n, uint32_t X) +{ + return (X << n) | (X >> (32 - n)); +} + +static inline uint32_t K(uint32_t t) +{ + if (t < 20) + { + return 0x5a827999; + } + else if (t < 40) + { + return 0x6ed9eba1; + } + else if (t < 60) + { + return 0x8f1bbcdc; + } + else + { + return 0xca62c1d6; + } +} + +const int SecureHashAlgorithm::kDigestSizeBytes = 20; + +void SecureHashAlgorithm::Init() +{ + A = 0; + B = 0; + C = 0; + D = 0; + E = 0; + cursor = 0; + l = 0; + H[0] = 0x67452301; + H[1] = 0xefcdab89; + H[2] = 0x98badcfe; + H[3] = 0x10325476; + H[4] = 0xc3d2e1f0; +} + +void SecureHashAlgorithm::Final() +{ + Pad(); + Process(); + + for (int t = 0; t < 5; ++t) + H[t] = ByteSwap(H[t]); +} + +void SecureHashAlgorithm::Update(const void *data, size_t nbytes) +{ + const uint8_t *d = reinterpret_cast<const uint8_t *>(data); + while (nbytes--) + { + M[cursor++] = *d++; + if (cursor >= 64) + Process(); + l += 8; + } +} + +void SecureHashAlgorithm::Pad() +{ + M[cursor++] = 0x80; + + if (cursor > 64 - 8) + { + // pad out to next block + while (cursor < 64) + M[cursor++] = 0; + + Process(); + } + + while (cursor < 64 - 8) + M[cursor++] = 0; + + M[cursor++] = (l >> 56) & 0xff; + M[cursor++] = (l >> 48) & 0xff; + M[cursor++] = (l >> 40) & 0xff; + M[cursor++] = (l >> 32) & 0xff; + M[cursor++] = (l >> 24) & 0xff; + M[cursor++] = (l >> 16) & 0xff; + M[cursor++] = (l >> 8) & 0xff; + M[cursor++] = l & 0xff; +} + +void SecureHashAlgorithm::Process() +{ + uint32_t t; + + // Each a...e corresponds to a section in the FIPS 180-3 algorithm. + + // a. + // + // W and M are in a union, so no need to memcpy. + // memcpy(W, M, sizeof(M)); + for (t = 0; t < 16; ++t) + W[t] = ByteSwap(W[t]); + + // b. + for (t = 16; t < 80; ++t) + W[t] = S(1, W[t - 3] ^ W[t - 8] ^ W[t - 14] ^ W[t - 16]); + + // c. + A = H[0]; + B = H[1]; + C = H[2]; + D = H[3]; + E = H[4]; + + // d. + for (t = 0; t < 80; ++t) + { + uint32_t TEMP = S(5, A) + f(t, B, C, D) + E + W[t] + K(t); + E = D; + D = C; + C = S(30, B); + B = A; + A = TEMP; + } + + // e. + H[0] += A; + H[1] += B; + H[2] += C; + H[3] += D; + H[4] += E; + + cursor = 0; +} + +std::string SHA1HashString(const std::string &str) +{ + char hash[SecureHashAlgorithm::kDigestSizeBytes]; + SHA1HashBytes(reinterpret_cast<const unsigned char *>(str.c_str()), str.length(), + reinterpret_cast<unsigned char *>(hash)); + return std::string(hash, SecureHashAlgorithm::kDigestSizeBytes); +} + +void SHA1HashBytes(const unsigned char *data, size_t len, unsigned char *hash) +{ + SecureHashAlgorithm sha; + sha.Update(data, len); + sha.Final(); + + memcpy(hash, sha.Digest(), SecureHashAlgorithm::kDigestSizeBytes); +} + +} // namespace base + +} // namespace angle diff --git a/gfx/angle/checkout/src/common/third_party/base/anglebase/sha1.h b/gfx/angle/checkout/src/common/third_party/base/anglebase/sha1.h new file mode 100644 index 0000000000..a60908814f --- /dev/null +++ b/gfx/angle/checkout/src/common/third_party/base/anglebase/sha1.h @@ -0,0 +1,36 @@ +// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef ANGLEBASE_SHA1_H_ +#define ANGLEBASE_SHA1_H_ + +#include <stddef.h> + +#include <string> + +#include "anglebase/base_export.h" + +namespace angle +{ + +namespace base +{ + +// These functions perform SHA-1 operations. + +static const size_t kSHA1Length = 20; // Length in bytes of a SHA-1 hash. + +// Computes the SHA-1 hash of the input string |str| and returns the full +// hash. +ANGLEBASE_EXPORT std::string SHA1HashString(const std::string &str); + +// Computes the SHA-1 hash of the |len| bytes in |data| and puts the hash +// in |hash|. |hash| must be kSHA1Length bytes long. +ANGLEBASE_EXPORT void SHA1HashBytes(const unsigned char *data, size_t len, unsigned char *hash); + +} // namespace base + +} // namespace angle + +#endif // ANGLEBASE_SHA1_H_ diff --git a/gfx/angle/checkout/src/common/third_party/base/anglebase/sys_byteorder.h b/gfx/angle/checkout/src/common/third_party/base/anglebase/sys_byteorder.h new file mode 100644 index 0000000000..43d1777f26 --- /dev/null +++ b/gfx/angle/checkout/src/common/third_party/base/anglebase/sys_byteorder.h @@ -0,0 +1,49 @@ +// +// Copyright 2017 The ANGLE Project Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// +// sys_byteorder.h: Compatiblity hacks for importing Chromium's base/SHA1. + +#ifndef ANGLEBASE_SYS_BYTEORDER_H_ +#define ANGLEBASE_SYS_BYTEORDER_H_ + +namespace angle +{ + +namespace base +{ + +// Returns a value with all bytes in |x| swapped, i.e. reverses the endianness. +inline uint16_t ByteSwap(uint16_t x) +{ +#if defined(_MSC_VER) + return _byteswap_ushort(x); +#else + return __builtin_bswap16(x); +#endif +} + +inline uint32_t ByteSwap(uint32_t x) +{ +#if defined(_MSC_VER) + return _byteswap_ulong(x); +#else + return __builtin_bswap32(x); +#endif +} + +inline uint64_t ByteSwap(uint64_t x) +{ +#if defined(_MSC_VER) + return _byteswap_uint64(x); +#else + return __builtin_bswap64(x); +#endif +} + +} // namespace base + +} // namespace angle + +#endif // ANGLEBASE_SYS_BYTEORDER_H_
\ No newline at end of file diff --git a/gfx/angle/checkout/src/common/third_party/smhasher/src/PMurHash.cpp b/gfx/angle/checkout/src/common/third_party/smhasher/src/PMurHash.cpp new file mode 100644 index 0000000000..379e5ce3d5 --- /dev/null +++ b/gfx/angle/checkout/src/common/third_party/smhasher/src/PMurHash.cpp @@ -0,0 +1,339 @@ +/*----------------------------------------------------------------------------- + * MurmurHash3 was written by Austin Appleby, and is placed in the public + * domain. + * + * This implementation was written by Shane Day, and is also public domain. + * + * This is a portable ANSI C implementation of MurmurHash3_x86_32 (Murmur3A) + * with support for progressive processing. + */ + +/*----------------------------------------------------------------------------- + +If you want to understand the MurmurHash algorithm you would be much better +off reading the original source. Just point your browser at: +http://code.google.com/p/smhasher/source/browse/trunk/MurmurHash3.cpp + + +What this version provides? + +1. Progressive data feeding. Useful when the entire payload to be hashed +does not fit in memory or when the data is streamed through the application. +Also useful when hashing a number of strings with a common prefix. A partial +hash of a prefix string can be generated and reused for each suffix string. + +2. Portability. Plain old C so that it should compile on any old compiler. +Both CPU endian and access-alignment neutral, but avoiding inefficient code +when possible depending on CPU capabilities. + +3. Drop in. I personally like nice self contained public domain code, making it +easy to pilfer without loads of refactoring to work properly in the existing +application code & makefile structure and mucking around with licence files. +Just copy PMurHash.h and PMurHash.c and you're ready to go. + + +How does it work? + +We can only process entire 32 bit chunks of input, except for the very end +that may be shorter. So along with the partial hash we need to give back to +the caller a carry containing up to 3 bytes that we were unable to process. +This carry also needs to record the number of bytes the carry holds. I use +the low 2 bits as a count (0..3) and the carry bytes are shifted into the +high byte in stream order. + +To handle endianess I simply use a macro that reads a uint32_t and define +that macro to be a direct read on little endian machines, a read and swap +on big endian machines, or a byte-by-byte read if the endianess is unknown. + +-----------------------------------------------------------------------------*/ + +#include "PMurHash.h" +#include <stdint.h> + +/* I used ugly type names in the header to avoid potential conflicts with + * application or system typedefs & defines. Since I'm not including any more + * headers below here I can rename these so that the code reads like C99 */ +#undef uint32_t +#define uint32_t MH_UINT32 +#undef uint8_t +#define uint8_t MH_UINT8 + +/* MSVC warnings we choose to ignore */ +#if defined(_MSC_VER) +# pragma warning(disable : 4127) /* conditional expression is constant */ +#endif + +/*----------------------------------------------------------------------------- + * Endianess, misalignment capabilities and util macros + * + * The following 3 macros are defined in this section. The other macros defined + * are only needed to help derive these 3. + * + * READ_UINT32(x) Read a little endian unsigned 32-bit int + * UNALIGNED_SAFE Defined if READ_UINT32 works on non-word boundaries + * ROTL32(x,r) Rotate x left by r bits + */ + +/* Convention is to define __BYTE_ORDER == to one of these values */ +#if !defined(__BIG_ENDIAN) +# define __BIG_ENDIAN 4321 +#endif +#if !defined(__LITTLE_ENDIAN) +# define __LITTLE_ENDIAN 1234 +#endif + +/* I386 */ +#if defined(_M_IX86) || defined(__i386__) || defined(__i386) || defined(i386) +# define __BYTE_ORDER __LITTLE_ENDIAN +# define UNALIGNED_SAFE +#endif + +/* gcc 'may' define __LITTLE_ENDIAN__ or __BIG_ENDIAN__ to 1 (Note the trailing __), + * or even _LITTLE_ENDIAN or _BIG_ENDIAN (Note the single _ prefix) */ +#if !defined(__BYTE_ORDER) +# if defined(__LITTLE_ENDIAN__) && __LITTLE_ENDIAN__ == 1 || \ + defined(_LITTLE_ENDIAN) && _LITTLE_ENDIAN == 1 +# define __BYTE_ORDER __LITTLE_ENDIAN +# elif defined(__BIG_ENDIAN__) && __BIG_ENDIAN__ == 1 || defined(_BIG_ENDIAN) && _BIG_ENDIAN == 1 +# define __BYTE_ORDER __BIG_ENDIAN +# endif +#endif + +/* gcc (usually) defines xEL/EB macros for ARM and MIPS endianess */ +#if !defined(__BYTE_ORDER) +# if defined(__ARMEL__) || defined(__MIPSEL__) +# define __BYTE_ORDER __LITTLE_ENDIAN +# endif +# if defined(__ARMEB__) || defined(__MIPSEB__) +# define __BYTE_ORDER __BIG_ENDIAN +# endif +#endif + +/* Now find best way we can to READ_UINT32 */ +#if __BYTE_ORDER == __LITTLE_ENDIAN +/* CPU endian matches murmurhash algorithm, so read 32-bit word directly */ +# define READ_UINT32(ptr) (*((uint32_t *)(ptr))) +#elif __BYTE_ORDER == __BIG_ENDIAN +/* TODO: Add additional cases below where a compiler provided bswap32 is available */ +# if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) +# define READ_UINT32(ptr) (__builtin_bswap32(*((uint32_t *)(ptr)))) +# else +/* Without a known fast bswap32 we're just as well off doing this */ +# define READ_UINT32(ptr) (ptr[0] | ptr[1] << 8 | ptr[2] << 16 | ptr[3] << 24) +# define UNALIGNED_SAFE +# endif +#else +/* Unknown endianess so last resort is to read individual bytes */ +# define READ_UINT32(ptr) (ptr[0] | ptr[1] << 8 | ptr[2] << 16 | ptr[3] << 24) + +/* Since we're not doing word-reads we can skip the messing about with realignment */ +# define UNALIGNED_SAFE +#endif + +/* Find best way to ROTL32 */ +#if defined(_MSC_VER) +# include <stdlib.h> /* Microsoft put _rotl declaration in here */ +# define ROTL32(x, r) _rotl(x, r) +#else +/* gcc recognises this code and generates a rotate instruction for CPUs with one */ +# define ROTL32(x, r) (((uint32_t)x << r) | ((uint32_t)x >> (32 - r))) +#endif + +/*----------------------------------------------------------------------------- + * Core murmurhash algorithm macros */ + +#define C1 (0xcc9e2d51) +#define C2 (0x1b873593) + +/* This is the main processing body of the algorithm. It operates + * on each full 32-bits of input. */ +#define DOBLOCK(h1, k1) \ + do \ + { \ + k1 *= C1; \ + k1 = ROTL32(k1, 15); \ + k1 *= C2; \ + \ + h1 ^= k1; \ + h1 = ROTL32(h1, 13); \ + h1 = h1 * 5 + 0xe6546b64; \ + } while (0) + +/* Append unaligned bytes to carry, forcing hash churn if we have 4 bytes */ +/* cnt=bytes to process, h1=name of h1 var, c=carry, n=bytes in c, ptr/len=payload */ +#define DOBYTES(cnt, h1, c, n, ptr, len) \ + do \ + { \ + int _i = cnt; \ + while (_i--) \ + { \ + c = c >> 8 | *ptr++ << 24; \ + n++; \ + len--; \ + if (n == 4) \ + { \ + DOBLOCK(h1, c); \ + n = 0; \ + } \ + } \ + } while (0) + +/*---------------------------------------------------------------------------*/ + +namespace angle +{ +/* Main hashing function. Initialise carry to 0 and h1 to 0 or an initial seed + * if wanted. Both ph1 and pcarry are required arguments. */ +void PMurHash32_Process(uint32_t *ph1, uint32_t *pcarry, const void *key, int len) +{ + uint32_t h1 = *ph1; + uint32_t c = *pcarry; + + const uint8_t *ptr = (uint8_t *)key; + const uint8_t *end; + + /* Extract carry count from low 2 bits of c value */ + int n = c & 3; + +#if defined(UNALIGNED_SAFE) + /* This CPU handles unaligned word access */ + + /* Consume any carry bytes */ + int i = (4 - n) & 3; + if (i && i <= len) + { + DOBYTES(i, h1, c, n, ptr, len); + } + + /* Process 32-bit chunks */ + end = ptr + len / 4 * 4; + for (; ptr < end; ptr += 4) + { + uint32_t k1 = READ_UINT32(ptr); + DOBLOCK(h1, k1); + } + +#else /*UNALIGNED_SAFE*/ + /* This CPU does not handle unaligned word access */ + + /* Consume enough so that the next data byte is word aligned */ + int i = -(intptr_t)ptr & 3; + if (i && i <= len) + { + DOBYTES(i, h1, c, n, ptr, len); + } + + /* We're now aligned. Process in aligned blocks. Specialise for each possible carry count */ + end = ptr + len / 4 * 4; + switch (n) + { /* how many bytes in c */ + case 0: /* c=[----] w=[3210] b=[3210]=w c'=[----] */ + for (; ptr < end; ptr += 4) + { + uint32_t k1 = READ_UINT32(ptr); + DOBLOCK(h1, k1); + } + break; + case 1: /* c=[0---] w=[4321] b=[3210]=c>>24|w<<8 c'=[4---] */ + for (; ptr < end; ptr += 4) + { + uint32_t k1 = c >> 24; + c = READ_UINT32(ptr); + k1 |= c << 8; + DOBLOCK(h1, k1); + } + break; + case 2: /* c=[10--] w=[5432] b=[3210]=c>>16|w<<16 c'=[54--] */ + for (; ptr < end; ptr += 4) + { + uint32_t k1 = c >> 16; + c = READ_UINT32(ptr); + k1 |= c << 16; + DOBLOCK(h1, k1); + } + break; + case 3: /* c=[210-] w=[6543] b=[3210]=c>>8|w<<24 c'=[654-] */ + for (; ptr < end; ptr += 4) + { + uint32_t k1 = c >> 8; + c = READ_UINT32(ptr); + k1 |= c << 24; + DOBLOCK(h1, k1); + } + } +#endif /*UNALIGNED_SAFE*/ + + /* Advance over whole 32-bit chunks, possibly leaving 1..3 bytes */ + len -= len / 4 * 4; + + /* Append any remaining bytes into carry */ + DOBYTES(len, h1, c, n, ptr, len); + + /* Copy out new running hash and carry */ + *ph1 = h1; + *pcarry = (c & ~0xff) | n; +} + +/*---------------------------------------------------------------------------*/ + +/* Finalize a hash. To match the original Murmur3A the total_length must be provided */ +uint32_t PMurHash32_Result(uint32_t h, uint32_t carry, uint32_t total_length) +{ + uint32_t k1; + int n = carry & 3; + if (n) + { + k1 = carry >> (4 - n) * 8; + k1 *= C1; + k1 = ROTL32(k1, 15); + k1 *= C2; + h ^= k1; + } + h ^= total_length; + + /* fmix */ + h ^= h >> 16; + h *= 0x85ebca6b; + h ^= h >> 13; + h *= 0xc2b2ae35; + h ^= h >> 16; + + return h; +} + +/*---------------------------------------------------------------------------*/ + +/* Murmur3A compatable all-at-once */ +uint32_t PMurHash32(uint32_t seed, const void *key, int len) +{ + uint32_t h1 = seed, carry = 0; + PMurHash32_Process(&h1, &carry, key, len); + return PMurHash32_Result(h1, carry, len); +} + +/*---------------------------------------------------------------------------*/ + +/* Provide an API suitable for smhasher */ +void PMurHash32_test(const void *key, int len, uint32_t seed, void *out) +{ + uint32_t h1 = seed, carry = 0; + const uint8_t *ptr = (uint8_t *)key; + const uint8_t *end = ptr + len; + +#if 0 /* Exercise the progressive processing */ + while(ptr < end) { + //const uint8_t *mid = ptr + rand()%(end-ptr)+1; + const uint8_t *mid = ptr + (rand()&0xF); + mid = mid<end?mid:end; + PMurHash32_Process(&h1, &carry, ptr, mid-ptr); + ptr = mid; + } +#else + PMurHash32_Process(&h1, &carry, ptr, (int)(end - ptr)); +#endif + h1 = PMurHash32_Result(h1, carry, len); + *(uint32_t *)out = h1; +} +} // namespace angle + +/*---------------------------------------------------------------------------*/ diff --git a/gfx/angle/checkout/src/common/third_party/smhasher/src/PMurHash.h b/gfx/angle/checkout/src/common/third_party/smhasher/src/PMurHash.h new file mode 100644 index 0000000000..0a3c96fa14 --- /dev/null +++ b/gfx/angle/checkout/src/common/third_party/smhasher/src/PMurHash.h @@ -0,0 +1,57 @@ +/*----------------------------------------------------------------------------- + * MurmurHash3 was written by Austin Appleby, and is placed in the public + * domain. + * + * This implementation was written by Shane Day, and is also public domain. + * + * This is a portable ANSI C implementation of MurmurHash3_x86_32 (Murmur3A) + * with support for progressive processing. + */ + +/* ------------------------------------------------------------------------- */ +/* Determine what native type to use for uint32_t */ + +/* We can't use the name 'uint32_t' here because it will conflict with + * any version provided by the system headers or application. */ + +/* First look for special cases */ +#if defined(_MSC_VER) +# define MH_UINT32 unsigned long +#endif + +/* If the compiler says it's C99 then take its word for it */ +#if !defined(MH_UINT32) && (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) +# include <stdint.h> +# define MH_UINT32 uint32_t +#endif + +/* Otherwise try testing against max value macros from limit.h */ +#if !defined(MH_UINT32) +# include <limits.h> +# if (USHRT_MAX == 0xffffffffUL) +# define MH_UINT32 unsigned short +# elif (UINT_MAX == 0xffffffffUL) +# define MH_UINT32 unsigned int +# elif (ULONG_MAX == 0xffffffffUL) +# define MH_UINT32 unsigned long +# endif +#endif + +#if !defined(MH_UINT32) +# error Unable to determine type name for unsigned 32-bit int +#endif + +/* I'm yet to work on a platform where 'unsigned char' is not 8 bits */ +#define MH_UINT8 unsigned char + +/* ------------------------------------------------------------------------- */ +/* Prototypes */ + +namespace angle +{ +void PMurHash32_Process(MH_UINT32 *ph1, MH_UINT32 *pcarry, const void *key, int len); +MH_UINT32 PMurHash32_Result(MH_UINT32 h1, MH_UINT32 carry, MH_UINT32 total_length); +MH_UINT32 PMurHash32(MH_UINT32 seed, const void *key, int len); + +void PMurHash32_test(const void *key, int len, MH_UINT32 seed, void *out); +} // namespace angle diff --git a/gfx/angle/checkout/src/common/third_party/xxhash/xxhash.c b/gfx/angle/checkout/src/common/third_party/xxhash/xxhash.c new file mode 100644 index 0000000000..13669b2a4d --- /dev/null +++ b/gfx/angle/checkout/src/common/third_party/xxhash/xxhash.c @@ -0,0 +1,1030 @@ +/* +* xxHash - Fast Hash algorithm +* Copyright (C) 2012-2016, Yann Collet +* +* BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions are +* met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following disclaimer +* in the documentation and/or other materials provided with the +* distribution. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +* You can contact the author at : +* - xxHash homepage: http://www.xxhash.com +* - xxHash source repository : https://github.com/Cyan4973/xxHash +*/ + + +/* ************************************* +* Tuning parameters +***************************************/ +/*!XXH_FORCE_MEMORY_ACCESS : + * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. + * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. + * The below switch allow to select different access method for improved performance. + * Method 0 (default) : use `memcpy()`. Safe and portable. + * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). + * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. + * Method 2 : direct access. This method doesn't depend on compiler but violate C standard. + * It can generate buggy code on targets which do not support unaligned memory accesses. + * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) + * See http://stackoverflow.com/a/32095106/646947 for details. + * Prefer these methods in priority order (0 > 1 > 2) + */ +#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ +# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \ + || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) \ + || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) +# define XXH_FORCE_MEMORY_ACCESS 2 +# elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || \ + (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \ + || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \ + || defined(__ARM_ARCH_7S__) )) +# define XXH_FORCE_MEMORY_ACCESS 1 +# endif +#endif + +/*!XXH_ACCEPT_NULL_INPUT_POINTER : + * If input pointer is NULL, xxHash default behavior is to dereference it, triggering a segfault. + * When this macro is enabled, xxHash actively checks input for null pointer. + * It it is, result for null input pointers is the same as a null-length input. + */ +#ifndef XXH_ACCEPT_NULL_INPUT_POINTER /* can be defined externally */ +# define XXH_ACCEPT_NULL_INPUT_POINTER 0 +#endif + +/*!XXH_FORCE_NATIVE_FORMAT : + * By default, xxHash library provides endian-independent Hash values, based on little-endian convention. + * Results are therefore identical for little-endian and big-endian CPU. + * This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format. + * Should endian-independence be of no importance for your application, you may set the #define below to 1, + * to improve speed for Big-endian CPU. + * This option has no impact on Little_Endian CPU. + */ +#ifndef XXH_FORCE_NATIVE_FORMAT /* can be defined externally */ +# define XXH_FORCE_NATIVE_FORMAT 0 +#endif + +/*!XXH_FORCE_ALIGN_CHECK : + * This is a minor performance trick, only useful with lots of very small keys. + * It means : check for aligned/unaligned input. + * The check costs one initial branch per hash; + * set it to 0 when the input is guaranteed to be aligned, + * or when alignment doesn't matter for performance. + */ +#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */ +# if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64) +# define XXH_FORCE_ALIGN_CHECK 0 +# else +# define XXH_FORCE_ALIGN_CHECK 1 +# endif +#endif + + +/* ************************************* +* Includes & Memory related functions +***************************************/ +/*! Modify the local functions below should you wish to use some other memory routines +* for malloc(), free() */ +#include <stdlib.h> +static void* XXH_malloc(size_t s) { return malloc(s); } +static void XXH_free (void* p) { free(p); } +/*! and for memcpy() */ +#include <string.h> +static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); } + +#include <assert.h> /* assert */ + +#define XXH_STATIC_LINKING_ONLY +#include "xxhash.h" + + +/* ************************************* +* Compiler Specific Options +***************************************/ +#ifdef _MSC_VER /* Visual Studio */ +# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ +# define FORCE_INLINE static __forceinline +#else +# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ +# ifdef __GNUC__ +# define FORCE_INLINE static inline __attribute__((always_inline)) +# else +# define FORCE_INLINE static inline +# endif +# else +# define FORCE_INLINE static +# endif /* __STDC_VERSION__ */ +#endif + + +/* ************************************* +* Basic Types +***************************************/ +#ifndef MEM_MODULE +# if !defined (__VMS) \ + && (defined (__cplusplus) \ + || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) +# include <stdint.h> + typedef uint8_t BYTE; + typedef uint16_t U16; + typedef uint32_t U32; +# else + typedef unsigned char BYTE; + typedef unsigned short U16; + typedef unsigned int U32; +# endif +#endif + +#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) + +/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */ +static U32 XXH_read32(const void* memPtr) { return *(const U32*) memPtr; } + +#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) + +/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ +/* currently only defined for gcc and icc */ +typedef union { U32 u32; } __attribute__((packed)) unalign; +static U32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } + +#else + +/* portable and safe solution. Generally efficient. + * see : http://stackoverflow.com/a/32095106/646947 + */ +static U32 XXH_read32(const void* memPtr) +{ + U32 val; + memcpy(&val, memPtr, sizeof(val)); + return val; +} + +#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ + + +/* **************************************** +* Compiler-specific Functions and Macros +******************************************/ +#define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) + +/* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */ +#if defined(_MSC_VER) +# define XXH_rotl32(x,r) _rotl(x,r) +# define XXH_rotl64(x,r) _rotl64(x,r) +#else +# define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r))) +# define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r))) +#endif + +#if defined(_MSC_VER) /* Visual Studio */ +# define XXH_swap32 _byteswap_ulong +#elif XXH_GCC_VERSION >= 403 +# define XXH_swap32 __builtin_bswap32 +#else +static U32 XXH_swap32 (U32 x) +{ + return ((x << 24) & 0xff000000 ) | + ((x << 8) & 0x00ff0000 ) | + ((x >> 8) & 0x0000ff00 ) | + ((x >> 24) & 0x000000ff ); +} +#endif + + +/* ************************************* +* Architecture Macros +***************************************/ +typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess; + +/* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */ +#ifndef XXH_CPU_LITTLE_ENDIAN +static int XXH_isLittleEndian(void) +{ + const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */ + return one.c[0]; +} +# define XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian() +#endif + + +/* *************************** +* Memory reads +*****************************/ +typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment; + +FORCE_INLINE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align) +{ + if (align==XXH_unaligned) + return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr)); + else + return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr); +} + +FORCE_INLINE U32 XXH_readLE32(const void* ptr, XXH_endianess endian) +{ + return XXH_readLE32_align(ptr, endian, XXH_unaligned); +} + +static U32 XXH_readBE32(const void* ptr) +{ + return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr); +} + + +/* ************************************* +* Macros +***************************************/ +#define XXH_STATIC_ASSERT(c) { enum { XXH_sa = 1/(int)(!!(c)) }; } /* use after variable declarations */ +XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; } + + +/* ******************************************************************* +* 32-bit hash functions +*********************************************************************/ +static const U32 PRIME32_1 = 2654435761U; /* 0b10011110001101110111100110110001 */ +static const U32 PRIME32_2 = 2246822519U; /* 0b10000101111010111100101001110111 */ +static const U32 PRIME32_3 = 3266489917U; /* 0b11000010101100101010111000111101 */ +static const U32 PRIME32_4 = 668265263U; /* 0b00100111110101001110101100101111 */ +static const U32 PRIME32_5 = 374761393U; /* 0b00010110010101100110011110110001 */ + +static U32 XXH32_round(U32 seed, U32 input) +{ + seed += input * PRIME32_2; + seed = XXH_rotl32(seed, 13); + seed *= PRIME32_1; + return seed; +} + +/* mix all bits */ +static U32 XXH32_avalanche(U32 h32) +{ + h32 ^= h32 >> 15; + h32 *= PRIME32_2; + h32 ^= h32 >> 13; + h32 *= PRIME32_3; + h32 ^= h32 >> 16; + return(h32); +} + +#define XXH_get32bits(p) XXH_readLE32_align(p, endian, align) + +static U32 +XXH32_finalize(U32 h32, const void* ptr, size_t len, + XXH_endianess endian, XXH_alignment align) + +{ + const BYTE* p = (const BYTE*)ptr; + +#define PROCESS1 \ + h32 += (*p++) * PRIME32_5; \ + h32 = XXH_rotl32(h32, 11) * PRIME32_1 ; + +#define PROCESS4 \ + h32 += XXH_get32bits(p) * PRIME32_3; \ + p+=4; \ + h32 = XXH_rotl32(h32, 17) * PRIME32_4 ; + + switch(len&15) /* or switch(bEnd - p) */ + { + case 12: PROCESS4; + /* fallthrough */ + case 8: PROCESS4; + /* fallthrough */ + case 4: PROCESS4; + return XXH32_avalanche(h32); + + case 13: PROCESS4; + /* fallthrough */ + case 9: PROCESS4; + /* fallthrough */ + case 5: PROCESS4; + PROCESS1; + return XXH32_avalanche(h32); + + case 14: PROCESS4; + /* fallthrough */ + case 10: PROCESS4; + /* fallthrough */ + case 6: PROCESS4; + PROCESS1; + PROCESS1; + return XXH32_avalanche(h32); + + case 15: PROCESS4; + /* fallthrough */ + case 11: PROCESS4; + /* fallthrough */ + case 7: PROCESS4; + /* fallthrough */ + case 3: PROCESS1; + /* fallthrough */ + case 2: PROCESS1; + /* fallthrough */ + case 1: PROCESS1; + /* fallthrough */ + case 0: return XXH32_avalanche(h32); + } + assert(0); + return h32; /* reaching this point is deemed impossible */ +} + + +FORCE_INLINE U32 +XXH32_endian_align(const void* input, size_t len, U32 seed, + XXH_endianess endian, XXH_alignment align) +{ + const BYTE* p = (const BYTE*)input; + const BYTE* bEnd = p + len; + U32 h32; + +#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) + if (p==NULL) { + len=0; + bEnd=p=(const BYTE*)(size_t)16; + } +#endif + + if (len>=16) { + const BYTE* const limit = bEnd - 15; + U32 v1 = seed + PRIME32_1 + PRIME32_2; + U32 v2 = seed + PRIME32_2; + U32 v3 = seed + 0; + U32 v4 = seed - PRIME32_1; + + do { + v1 = XXH32_round(v1, XXH_get32bits(p)); p+=4; + v2 = XXH32_round(v2, XXH_get32bits(p)); p+=4; + v3 = XXH32_round(v3, XXH_get32bits(p)); p+=4; + v4 = XXH32_round(v4, XXH_get32bits(p)); p+=4; + } while (p < limit); + + h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18); + } else { + h32 = seed + PRIME32_5; + } + + h32 += (U32)len; + + return XXH32_finalize(h32, p, len&15, endian, align); +} + + +XXH_PUBLIC_API unsigned int XXH32 (const void* input, size_t len, unsigned int seed) +{ +#if 0 + /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ + XXH32_state_t state; + XXH32_reset(&state, seed); + XXH32_update(&state, input, len); + return XXH32_digest(&state); +#else + XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; + + if (XXH_FORCE_ALIGN_CHECK) { + if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */ + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned); + else + return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned); + } } + + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned); + else + return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned); +#endif +} + + + +/*====== Hash streaming ======*/ + +XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void) +{ + return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t)); +} +XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr) +{ + XXH_free(statePtr); + return XXH_OK; +} + +XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState) +{ + memcpy(dstState, srcState, sizeof(*dstState)); +} + +XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int seed) +{ + XXH32_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */ + memset(&state, 0, sizeof(state)); + state.v1 = seed + PRIME32_1 + PRIME32_2; + state.v2 = seed + PRIME32_2; + state.v3 = seed + 0; + state.v4 = seed - PRIME32_1; + /* do not write into reserved, planned to be removed in a future version */ + memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved)); + return XXH_OK; +} + + +FORCE_INLINE XXH_errorcode +XXH32_update_endian(XXH32_state_t* state, const void* input, size_t len, XXH_endianess endian) +{ + if (input==NULL) +#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) + return XXH_OK; +#else + return XXH_ERROR; +#endif + + { const BYTE* p = (const BYTE*)input; + const BYTE* const bEnd = p + len; + + state->total_len_32 += (unsigned)len; + state->large_len |= (len>=16) | (state->total_len_32>=16); + + if (state->memsize + len < 16) { /* fill in tmp buffer */ + XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len); + state->memsize += (unsigned)len; + return XXH_OK; + } + + if (state->memsize) { /* some data left from previous update */ + XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize); + { const U32* p32 = state->mem32; + state->v1 = XXH32_round(state->v1, XXH_readLE32(p32, endian)); p32++; + state->v2 = XXH32_round(state->v2, XXH_readLE32(p32, endian)); p32++; + state->v3 = XXH32_round(state->v3, XXH_readLE32(p32, endian)); p32++; + state->v4 = XXH32_round(state->v4, XXH_readLE32(p32, endian)); + } + p += 16-state->memsize; + state->memsize = 0; + } + + if (p <= bEnd-16) { + const BYTE* const limit = bEnd - 16; + U32 v1 = state->v1; + U32 v2 = state->v2; + U32 v3 = state->v3; + U32 v4 = state->v4; + + do { + v1 = XXH32_round(v1, XXH_readLE32(p, endian)); p+=4; + v2 = XXH32_round(v2, XXH_readLE32(p, endian)); p+=4; + v3 = XXH32_round(v3, XXH_readLE32(p, endian)); p+=4; + v4 = XXH32_round(v4, XXH_readLE32(p, endian)); p+=4; + } while (p<=limit); + + state->v1 = v1; + state->v2 = v2; + state->v3 = v3; + state->v4 = v4; + } + + if (p < bEnd) { + XXH_memcpy(state->mem32, p, (size_t)(bEnd-p)); + state->memsize = (unsigned)(bEnd-p); + } + } + + return XXH_OK; +} + + +XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len) +{ + XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; + + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH32_update_endian(state_in, input, len, XXH_littleEndian); + else + return XXH32_update_endian(state_in, input, len, XXH_bigEndian); +} + + +FORCE_INLINE U32 +XXH32_digest_endian (const XXH32_state_t* state, XXH_endianess endian) +{ + U32 h32; + + if (state->large_len) { + h32 = XXH_rotl32(state->v1, 1) + + XXH_rotl32(state->v2, 7) + + XXH_rotl32(state->v3, 12) + + XXH_rotl32(state->v4, 18); + } else { + h32 = state->v3 /* == seed */ + PRIME32_5; + } + + h32 += state->total_len_32; + + return XXH32_finalize(h32, state->mem32, state->memsize, endian, XXH_aligned); +} + + +XXH_PUBLIC_API unsigned int XXH32_digest (const XXH32_state_t* state_in) +{ + XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; + + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH32_digest_endian(state_in, XXH_littleEndian); + else + return XXH32_digest_endian(state_in, XXH_bigEndian); +} + + +/*====== Canonical representation ======*/ + +/*! Default XXH result types are basic unsigned 32 and 64 bits. +* The canonical representation follows human-readable write convention, aka big-endian (large digits first). +* These functions allow transformation of hash result into and from its canonical format. +* This way, hash values can be written into a file or buffer, remaining comparable across different systems. +*/ + +XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash) +{ + XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t)); + if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash); + memcpy(dst, &hash, sizeof(*dst)); +} + +XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src) +{ + return XXH_readBE32(src); +} + + +#ifndef XXH_NO_LONG_LONG + +/* ******************************************************************* +* 64-bit hash functions +*********************************************************************/ + +/*====== Memory access ======*/ + +#ifndef MEM_MODULE +# define MEM_MODULE +# if !defined (__VMS) \ + && (defined (__cplusplus) \ + || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) +# include <stdint.h> + typedef uint64_t U64; +# else + /* if compiler doesn't support unsigned long long, replace by another 64-bit type */ + typedef unsigned long long U64; +# endif +#endif + + +#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) + +/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */ +static U64 XXH_read64(const void* memPtr) { return *(const U64*) memPtr; } + +#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) + +/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ +/* currently only defined for gcc and icc */ +typedef union { U32 u32; U64 u64; } __attribute__((packed)) unalign64; +static U64 XXH_read64(const void* ptr) { return ((const unalign64*)ptr)->u64; } + +#else + +/* portable and safe solution. Generally efficient. + * see : http://stackoverflow.com/a/32095106/646947 + */ + +static U64 XXH_read64(const void* memPtr) +{ + U64 val; + memcpy(&val, memPtr, sizeof(val)); + return val; +} + +#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ + +#if defined(_MSC_VER) /* Visual Studio */ +# define XXH_swap64 _byteswap_uint64 +#elif XXH_GCC_VERSION >= 403 +# define XXH_swap64 __builtin_bswap64 +#else +static U64 XXH_swap64 (U64 x) +{ + return ((x << 56) & 0xff00000000000000ULL) | + ((x << 40) & 0x00ff000000000000ULL) | + ((x << 24) & 0x0000ff0000000000ULL) | + ((x << 8) & 0x000000ff00000000ULL) | + ((x >> 8) & 0x00000000ff000000ULL) | + ((x >> 24) & 0x0000000000ff0000ULL) | + ((x >> 40) & 0x000000000000ff00ULL) | + ((x >> 56) & 0x00000000000000ffULL); +} +#endif + +FORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align) +{ + if (align==XXH_unaligned) + return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr)); + else + return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr); +} + +FORCE_INLINE U64 XXH_readLE64(const void* ptr, XXH_endianess endian) +{ + return XXH_readLE64_align(ptr, endian, XXH_unaligned); +} + +static U64 XXH_readBE64(const void* ptr) +{ + return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr); +} + + +/*====== xxh64 ======*/ + +static const U64 PRIME64_1 = 11400714785074694791ULL; /* 0b1001111000110111011110011011000110000101111010111100101010000111 */ +static const U64 PRIME64_2 = 14029467366897019727ULL; /* 0b1100001010110010101011100011110100100111110101001110101101001111 */ +static const U64 PRIME64_3 = 1609587929392839161ULL; /* 0b0001011001010110011001111011000110011110001101110111100111111001 */ +static const U64 PRIME64_4 = 9650029242287828579ULL; /* 0b1000010111101011110010100111011111000010101100101010111001100011 */ +static const U64 PRIME64_5 = 2870177450012600261ULL; /* 0b0010011111010100111010110010111100010110010101100110011111000101 */ + +static U64 XXH64_round(U64 acc, U64 input) +{ + acc += input * PRIME64_2; + acc = XXH_rotl64(acc, 31); + acc *= PRIME64_1; + return acc; +} + +static U64 XXH64_mergeRound(U64 acc, U64 val) +{ + val = XXH64_round(0, val); + acc ^= val; + acc = acc * PRIME64_1 + PRIME64_4; + return acc; +} + +static U64 XXH64_avalanche(U64 h64) +{ + h64 ^= h64 >> 33; + h64 *= PRIME64_2; + h64 ^= h64 >> 29; + h64 *= PRIME64_3; + h64 ^= h64 >> 32; + return h64; +} + + +#define XXH_get64bits(p) XXH_readLE64_align(p, endian, align) + +static U64 +XXH64_finalize(U64 h64, const void* ptr, size_t len, + XXH_endianess endian, XXH_alignment align) +{ + const BYTE* p = (const BYTE*)ptr; + +#define PROCESS1_64 \ + h64 ^= (*p++) * PRIME64_5; \ + h64 = XXH_rotl64(h64, 11) * PRIME64_1; + +#define PROCESS4_64 \ + h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1; \ + p+=4; \ + h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3; + +#define PROCESS8_64 { \ + U64 const k1 = XXH64_round(0, XXH_get64bits(p)); \ + p+=8; \ + h64 ^= k1; \ + h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; \ +} + + switch(len&31) { + case 24: PROCESS8_64; + /* fallthrough */ + case 16: PROCESS8_64; + /* fallthrough */ + case 8: PROCESS8_64; + return XXH64_avalanche(h64); + + case 28: PROCESS8_64; + /* fallthrough */ + case 20: PROCESS8_64; + /* fallthrough */ + case 12: PROCESS8_64; + /* fallthrough */ + case 4: PROCESS4_64; + return XXH64_avalanche(h64); + + case 25: PROCESS8_64; + /* fallthrough */ + case 17: PROCESS8_64; + /* fallthrough */ + case 9: PROCESS8_64; + PROCESS1_64; + return XXH64_avalanche(h64); + + case 29: PROCESS8_64; + /* fallthrough */ + case 21: PROCESS8_64; + /* fallthrough */ + case 13: PROCESS8_64; + /* fallthrough */ + case 5: PROCESS4_64; + PROCESS1_64; + return XXH64_avalanche(h64); + + case 26: PROCESS8_64; + /* fallthrough */ + case 18: PROCESS8_64; + /* fallthrough */ + case 10: PROCESS8_64; + PROCESS1_64; + PROCESS1_64; + return XXH64_avalanche(h64); + + case 30: PROCESS8_64; + /* fallthrough */ + case 22: PROCESS8_64; + /* fallthrough */ + case 14: PROCESS8_64; + /* fallthrough */ + case 6: PROCESS4_64; + PROCESS1_64; + PROCESS1_64; + return XXH64_avalanche(h64); + + case 27: PROCESS8_64; + /* fallthrough */ + case 19: PROCESS8_64; + /* fallthrough */ + case 11: PROCESS8_64; + PROCESS1_64; + PROCESS1_64; + PROCESS1_64; + return XXH64_avalanche(h64); + + case 31: PROCESS8_64; + /* fallthrough */ + case 23: PROCESS8_64; + /* fallthrough */ + case 15: PROCESS8_64; + /* fallthrough */ + case 7: PROCESS4_64; + /* fallthrough */ + case 3: PROCESS1_64; + /* fallthrough */ + case 2: PROCESS1_64; + /* fallthrough */ + case 1: PROCESS1_64; + /* fallthrough */ + case 0: return XXH64_avalanche(h64); + } + + /* impossible to reach */ + assert(0); + return 0; /* unreachable, but some compilers complain without it */ +} + +FORCE_INLINE U64 +XXH64_endian_align(const void* input, size_t len, U64 seed, + XXH_endianess endian, XXH_alignment align) +{ + const BYTE* p = (const BYTE*)input; + const BYTE* bEnd = p + len; + U64 h64; + +#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) + if (p==NULL) { + len=0; + bEnd=p=(const BYTE*)(size_t)32; + } +#endif + + if (len>=32) { + const BYTE* const limit = bEnd - 32; + U64 v1 = seed + PRIME64_1 + PRIME64_2; + U64 v2 = seed + PRIME64_2; + U64 v3 = seed + 0; + U64 v4 = seed - PRIME64_1; + + do { + v1 = XXH64_round(v1, XXH_get64bits(p)); p+=8; + v2 = XXH64_round(v2, XXH_get64bits(p)); p+=8; + v3 = XXH64_round(v3, XXH_get64bits(p)); p+=8; + v4 = XXH64_round(v4, XXH_get64bits(p)); p+=8; + } while (p<=limit); + + h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18); + h64 = XXH64_mergeRound(h64, v1); + h64 = XXH64_mergeRound(h64, v2); + h64 = XXH64_mergeRound(h64, v3); + h64 = XXH64_mergeRound(h64, v4); + + } else { + h64 = seed + PRIME64_5; + } + + h64 += (U64) len; + + return XXH64_finalize(h64, p, len, endian, align); +} + + +XXH_PUBLIC_API unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed) +{ +#if 0 + /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ + XXH64_state_t state; + XXH64_reset(&state, seed); + XXH64_update(&state, input, len); + return XXH64_digest(&state); +#else + XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; + + if (XXH_FORCE_ALIGN_CHECK) { + if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */ + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned); + else + return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned); + } } + + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned); + else + return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned); +#endif +} + +/*====== Hash Streaming ======*/ + +XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void) +{ + return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t)); +} +XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr) +{ + XXH_free(statePtr); + return XXH_OK; +} + +XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dstState, const XXH64_state_t* srcState) +{ + memcpy(dstState, srcState, sizeof(*dstState)); +} + +XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed) +{ + XXH64_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */ + memset(&state, 0, sizeof(state)); + state.v1 = seed + PRIME64_1 + PRIME64_2; + state.v2 = seed + PRIME64_2; + state.v3 = seed + 0; + state.v4 = seed - PRIME64_1; + /* do not write into reserved, planned to be removed in a future version */ + memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved)); + return XXH_OK; +} + +FORCE_INLINE XXH_errorcode +XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian) +{ + if (input==NULL) +#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) + return XXH_OK; +#else + return XXH_ERROR; +#endif + + { const BYTE* p = (const BYTE*)input; + const BYTE* const bEnd = p + len; + + state->total_len += len; + + if (state->memsize + len < 32) { /* fill in tmp buffer */ + XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len); + state->memsize += (U32)len; + return XXH_OK; + } + + if (state->memsize) { /* tmp buffer is full */ + XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize); + state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0, endian)); + state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1, endian)); + state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2, endian)); + state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3, endian)); + p += 32-state->memsize; + state->memsize = 0; + } + + if (p+32 <= bEnd) { + const BYTE* const limit = bEnd - 32; + U64 v1 = state->v1; + U64 v2 = state->v2; + U64 v3 = state->v3; + U64 v4 = state->v4; + + do { + v1 = XXH64_round(v1, XXH_readLE64(p, endian)); p+=8; + v2 = XXH64_round(v2, XXH_readLE64(p, endian)); p+=8; + v3 = XXH64_round(v3, XXH_readLE64(p, endian)); p+=8; + v4 = XXH64_round(v4, XXH_readLE64(p, endian)); p+=8; + } while (p<=limit); + + state->v1 = v1; + state->v2 = v2; + state->v3 = v3; + state->v4 = v4; + } + + if (p < bEnd) { + XXH_memcpy(state->mem64, p, (size_t)(bEnd-p)); + state->memsize = (unsigned)(bEnd-p); + } + } + + return XXH_OK; +} + +XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t len) +{ + XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; + + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH64_update_endian(state_in, input, len, XXH_littleEndian); + else + return XXH64_update_endian(state_in, input, len, XXH_bigEndian); +} + +FORCE_INLINE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess endian) +{ + U64 h64; + + if (state->total_len >= 32) { + U64 const v1 = state->v1; + U64 const v2 = state->v2; + U64 const v3 = state->v3; + U64 const v4 = state->v4; + + h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18); + h64 = XXH64_mergeRound(h64, v1); + h64 = XXH64_mergeRound(h64, v2); + h64 = XXH64_mergeRound(h64, v3); + h64 = XXH64_mergeRound(h64, v4); + } else { + h64 = state->v3 /*seed*/ + PRIME64_5; + } + + h64 += (U64) state->total_len; + + return XXH64_finalize(h64, state->mem64, (size_t)state->total_len, endian, XXH_aligned); +} + +XXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* state_in) +{ + XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; + + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH64_digest_endian(state_in, XXH_littleEndian); + else + return XXH64_digest_endian(state_in, XXH_bigEndian); +} + + +/*====== Canonical representation ======*/ + +XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash) +{ + XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t)); + if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash); + memcpy(dst, &hash, sizeof(*dst)); +} + +XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src) +{ + return XXH_readBE64(src); +} + +#endif /* XXH_NO_LONG_LONG */ diff --git a/gfx/angle/checkout/src/common/third_party/xxhash/xxhash.h b/gfx/angle/checkout/src/common/third_party/xxhash/xxhash.h new file mode 100644 index 0000000000..0de203c947 --- /dev/null +++ b/gfx/angle/checkout/src/common/third_party/xxhash/xxhash.h @@ -0,0 +1,341 @@ +/* + xxHash - Extremely Fast Hash algorithm + Header File + Copyright (C) 2012-2016, Yann Collet. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - xxHash source repository : https://github.com/Cyan4973/xxHash +*/ + +/* Notice extracted from xxHash homepage : + +xxHash is an extremely fast Hash algorithm, running at RAM speed limits. +It also successfully passes all tests from the SMHasher suite. + +Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo @3GHz) + +Name Speed Q.Score Author +xxHash 5.4 GB/s 10 +CrapWow 3.2 GB/s 2 Andrew +MumurHash 3a 2.7 GB/s 10 Austin Appleby +SpookyHash 2.0 GB/s 10 Bob Jenkins +SBox 1.4 GB/s 9 Bret Mulvey +Lookup3 1.2 GB/s 9 Bob Jenkins +SuperFastHash 1.2 GB/s 1 Paul Hsieh +CityHash64 1.05 GB/s 10 Pike & Alakuijala +FNV 0.55 GB/s 5 Fowler, Noll, Vo +CRC32 0.43 GB/s 9 +MD5-32 0.33 GB/s 10 Ronald L. Rivest +SHA1-32 0.28 GB/s 10 + +Q.Score is a measure of quality of the hash function. +It depends on successfully passing SMHasher test set. +10 is a perfect score. + +A 64-bit version, named XXH64, is available since r35. +It offers much better speed, but for 64-bit applications only. +Name Speed on 64 bits Speed on 32 bits +XXH64 13.8 GB/s 1.9 GB/s +XXH32 6.8 GB/s 6.0 GB/s +*/ + +#ifndef XXHASH_H_5627135585666179 +#define XXHASH_H_5627135585666179 1 + +#if defined(__cplusplus) +extern "C" { +#endif + +/* **************************** + * Definitions + ******************************/ +#include <stddef.h> /* size_t */ +typedef enum +{ + XXH_OK = 0, + XXH_ERROR +} XXH_errorcode; + +/* **************************** + * API modifier + ******************************/ +/** XXH_INLINE_ALL (and XXH_PRIVATE_API) + * This is useful to include xxhash functions in `static` mode + * in order to inline them, and remove their symbol from the public list. + * Inlining can offer dramatic performance improvement on small keys. + * Methodology : + * #define XXH_INLINE_ALL + * #include "xxhash.h" + * `xxhash.c` is automatically included. + * It's not useful to compile and link it as a separate module. + */ +#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) +# ifndef XXH_STATIC_LINKING_ONLY +# define XXH_STATIC_LINKING_ONLY +# endif +# if defined(__GNUC__) +# define XXH_PUBLIC_API static __inline __attribute__((unused)) +#elif defined(__cplusplus) || (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 \ + */) +# define XXH_PUBLIC_API static inline +# elif defined(_MSC_VER) +# define XXH_PUBLIC_API static __inline +# else +/* this version may generate warnings for unused static functions */ +# define XXH_PUBLIC_API static +# endif +#else +# define XXH_PUBLIC_API /* do nothing */ +#endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */ + +/*! XXH_NAMESPACE, aka Namespace Emulation : + * + * If you want to include _and expose_ xxHash functions from within your own library, + * but also want to avoid symbol collisions with other libraries which may also include xxHash, + * + * you can use XXH_NAMESPACE, to automatically prefix any public symbol from xxhash library + * with the value of XXH_NAMESPACE (therefore, avoid NULL and numeric values). + * + * Note that no change is required within the calling program as long as it includes `xxhash.h` : + * regular symbol name will be automatically translated by this header. + */ +#ifdef XXH_NAMESPACE +# define XXH_CAT(A, B) A##B +# define XXH_NAME2(A, B) XXH_CAT(A, B) +# define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber) +# define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32) +# define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState) +# define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState) +# define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset) +# define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update) +# define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest) +# define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState) +# define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash) +# define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical) +# define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64) +# define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState) +# define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState) +# define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset) +# define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update) +# define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest) +# define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState) +# define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash) +# define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical) +#endif + +/* ************************************* + * Version + ***************************************/ +#define XXH_VERSION_MAJOR 0 +#define XXH_VERSION_MINOR 6 +#define XXH_VERSION_RELEASE 5 +#define XXH_VERSION_NUMBER \ + (XXH_VERSION_MAJOR * 100 * 100 + XXH_VERSION_MINOR * 100 + XXH_VERSION_RELEASE) +XXH_PUBLIC_API unsigned XXH_versionNumber(void); + +/*-********************************************************************** + * 32-bit hash + ************************************************************************/ +typedef unsigned int XXH32_hash_t; + +/*! XXH32() : + Calculate the 32-bit hash of sequence "length" bytes stored at memory address "input". + The memory between input & input+length must be valid (allocated and read-accessible). + "seed" can be used to alter the result predictably. + Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s */ +XXH_PUBLIC_API XXH32_hash_t XXH32(const void *input, size_t length, unsigned int seed); + +/*====== Streaming ======*/ +typedef struct XXH32_state_s XXH32_state_t; /* incomplete type */ +XXH_PUBLIC_API XXH32_state_t *XXH32_createState(void); +XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t *statePtr); +XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t *dst_state, const XXH32_state_t *src_state); + +XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t *statePtr, unsigned int seed); +XXH_PUBLIC_API XXH_errorcode XXH32_update(XXH32_state_t *statePtr, + const void *input, + size_t length); +XXH_PUBLIC_API XXH32_hash_t XXH32_digest(const XXH32_state_t *statePtr); + +/* + * Streaming functions generate the xxHash of an input provided in multiple segments. + * Note that, for small input, they are slower than single-call functions, due to state + * management. For small inputs, prefer `XXH32()` and `XXH64()`, which are better optimized. + * + * XXH state must first be allocated, using XXH*_createState() . + * + * Start a new hash by initializing state with a seed, using XXH*_reset(). + * + * Then, feed the hash state by calling XXH*_update() as many times as necessary. + * The function returns an error code, with 0 meaning OK, and any other value meaning there is + * an error. + * + * Finally, a hash value can be produced anytime, by using XXH*_digest(). + * This function returns the nn-bits hash as an int or long long. + * + * It's still possible to continue inserting input into the hash state after a digest, + * and generate some new hashes later on, by calling again XXH*_digest(). + * + * When done, free XXH state space if it was allocated dynamically. + */ + +/*====== Canonical representation ======*/ + +typedef struct +{ + unsigned char digest[4]; +} XXH32_canonical_t; +XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t *dst, XXH32_hash_t hash); +XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t *src); + +/* Default result type for XXH functions are primitive unsigned 32 and 64 bits. + * The canonical representation uses human-readable write convention, aka big-endian (large + * digits first). These functions allow transformation of hash result into and from its + * canonical format. This way, hash values can be written into a file / memory, and remain + * comparable on different systems and programs. + */ + +#ifndef XXH_NO_LONG_LONG +/*-********************************************************************** + * 64-bit hash + ************************************************************************/ +typedef unsigned long long XXH64_hash_t; + +/*! XXH64() : + Calculate the 64-bit hash of sequence of length "len" stored at memory address "input". + "seed" can be used to alter the result predictably. + This function runs faster on 64-bit systems, but slower on 32-bit systems (see benchmark). +*/ +XXH_PUBLIC_API XXH64_hash_t XXH64(const void *input, size_t length, unsigned long long seed); + +/*====== Streaming ======*/ +typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */ +XXH_PUBLIC_API XXH64_state_t *XXH64_createState(void); +XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t *statePtr); +XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t *dst_state, const XXH64_state_t *src_state); + +XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t *statePtr, unsigned long long seed); +XXH_PUBLIC_API XXH_errorcode XXH64_update(XXH64_state_t *statePtr, + const void *input, + size_t length); +XXH_PUBLIC_API XXH64_hash_t XXH64_digest(const XXH64_state_t *statePtr); + +/*====== Canonical representation ======*/ +typedef struct +{ + unsigned char digest[8]; +} XXH64_canonical_t; +XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t *dst, XXH64_hash_t hash); +XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t *src); +#endif /* XXH_NO_LONG_LONG */ + +#ifdef XXH_STATIC_LINKING_ONLY + +/* ================================================================================================ + This section contains declarations which are not guaranteed to remain stable. + They may change in future versions, becoming incompatible with a different version of the +library. These declarations should only be used with static linking. Never use them in +association with dynamic linking ! +=================================================================================================== +*/ + +/* These definitions are only present to allow + * static allocation of XXH state, on stack or in a struct for example. + * Never **ever** use members directly. */ + +# if !defined(__VMS) && (defined(__cplusplus) || (defined(__STDC_VERSION__) && \ + (__STDC_VERSION__ >= 199901L) /* C99 */)) +# include <stdint.h> + +struct XXH32_state_s +{ + uint32_t total_len_32; + uint32_t large_len; + uint32_t v1; + uint32_t v2; + uint32_t v3; + uint32_t v4; + uint32_t mem32[4]; + uint32_t memsize; + uint32_t reserved; /* never read nor write, might be removed in a future version */ +}; /* typedef'd to XXH32_state_t */ + +struct XXH64_state_s +{ + uint64_t total_len; + uint64_t v1; + uint64_t v2; + uint64_t v3; + uint64_t v4; + uint64_t mem64[4]; + uint32_t memsize; + uint32_t reserved[2]; /* never read nor write, might be removed in a future version */ +}; /* typedef'd to XXH64_state_t */ + +# else + +struct XXH32_state_s +{ + unsigned total_len_32; + unsigned large_len; + unsigned v1; + unsigned v2; + unsigned v3; + unsigned v4; + unsigned mem32[4]; + unsigned memsize; + unsigned reserved; /* never read nor write, might be removed in a future version */ +}; /* typedef'd to XXH32_state_t */ + +# ifndef XXH_NO_LONG_LONG /* remove 64-bit support */ +struct XXH64_state_s +{ + unsigned long long total_len; + unsigned long long v1; + unsigned long long v2; + unsigned long long v3; + unsigned long long v4; + unsigned long long mem64[4]; + unsigned memsize; + unsigned reserved[2]; /* never read nor write, might be removed in a future version */ +}; /* typedef'd to XXH64_state_t */ +# endif + +# endif + +# if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) +# include "xxhash.c" /* include xxhash function bodies as `static`, for inlining */ +# endif + +#endif /* XXH_STATIC_LINKING_ONLY */ + +#if defined(__cplusplus) +} +#endif + +#endif /* XXHASH_H_5627135585666179 */ diff --git a/gfx/angle/checkout/src/common/tls.cpp b/gfx/angle/checkout/src/common/tls.cpp new file mode 100644 index 0000000000..20977f48cf --- /dev/null +++ b/gfx/angle/checkout/src/common/tls.cpp @@ -0,0 +1,157 @@ +// +// Copyright (c) 2014 The ANGLE Project Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// + +// tls.cpp: Simple cross-platform interface for thread local storage. + +#include "common/tls.h" + +#include <assert.h> + +#ifdef ANGLE_ENABLE_WINDOWS_STORE +# include <map> +# include <mutex> +# include <set> +# include <vector> + +# include <Windows.System.Threading.h> +# include <wrl/async.h> +# include <wrl/client.h> + +using namespace std; +using namespace Windows::Foundation; +using namespace ABI::Windows::System::Threading; + +// Thread local storage for Windows Store support +typedef vector<void *> ThreadLocalData; + +static __declspec(thread) ThreadLocalData *currentThreadData = nullptr; +static set<ThreadLocalData *> allThreadData; +static DWORD nextTlsIndex = 0; +static vector<DWORD> freeTlsIndices; + +#endif + +TLSIndex CreateTLSIndex() +{ + TLSIndex index; + +#ifdef ANGLE_PLATFORM_WINDOWS +# ifdef ANGLE_ENABLE_WINDOWS_STORE + if (!freeTlsIndices.empty()) + { + DWORD result = freeTlsIndices.back(); + freeTlsIndices.pop_back(); + index = result; + } + else + { + index = nextTlsIndex++; + } +# else + index = TlsAlloc(); +# endif + +#elif defined(ANGLE_PLATFORM_POSIX) + // Create global pool key + if ((pthread_key_create(&index, nullptr)) != 0) + { + index = TLS_INVALID_INDEX; + } +#endif + + assert(index != TLS_INVALID_INDEX && + "CreateTLSIndex(): Unable to allocate Thread Local Storage"); + return index; +} + +bool DestroyTLSIndex(TLSIndex index) +{ + assert(index != TLS_INVALID_INDEX && "DestroyTLSIndex(): Invalid TLS Index"); + if (index == TLS_INVALID_INDEX) + { + return false; + } + +#ifdef ANGLE_PLATFORM_WINDOWS +# ifdef ANGLE_ENABLE_WINDOWS_STORE + assert(index < nextTlsIndex); + assert(find(freeTlsIndices.begin(), freeTlsIndices.end(), index) == freeTlsIndices.end()); + + freeTlsIndices.push_back(index); + for (auto threadData : allThreadData) + { + if (threadData->size() > index) + { + threadData->at(index) = nullptr; + } + } + return true; +# else + return (TlsFree(index) == TRUE); +# endif +#elif defined(ANGLE_PLATFORM_POSIX) + return (pthread_key_delete(index) == 0); +#endif +} + +bool SetTLSValue(TLSIndex index, void *value) +{ + assert(index != TLS_INVALID_INDEX && "SetTLSValue(): Invalid TLS Index"); + if (index == TLS_INVALID_INDEX) + { + return false; + } + +#ifdef ANGLE_PLATFORM_WINDOWS +# ifdef ANGLE_ENABLE_WINDOWS_STORE + ThreadLocalData *threadData = currentThreadData; + if (!threadData) + { + threadData = new ThreadLocalData(index + 1, nullptr); + allThreadData.insert(threadData); + currentThreadData = threadData; + } + else if (threadData->size() <= index) + { + threadData->resize(index + 1, nullptr); + } + + threadData->at(index) = value; + return true; +# else + return (TlsSetValue(index, value) == TRUE); +# endif +#elif defined(ANGLE_PLATFORM_POSIX) + return (pthread_setspecific(index, value) == 0); +#endif +} + +void *GetTLSValue(TLSIndex index) +{ + assert(index != TLS_INVALID_INDEX && "GetTLSValue(): Invalid TLS Index"); + if (index == TLS_INVALID_INDEX) + { + return nullptr; + } + +#ifdef ANGLE_PLATFORM_WINDOWS +# ifdef ANGLE_ENABLE_WINDOWS_STORE + ThreadLocalData *threadData = currentThreadData; + if (threadData && threadData->size() > index) + { + return threadData->at(index); + } + else + { + return nullptr; + } +# else + return TlsGetValue(index); +# endif +#elif defined(ANGLE_PLATFORM_POSIX) + return pthread_getspecific(index); +#endif +} diff --git a/gfx/angle/checkout/src/common/tls.h b/gfx/angle/checkout/src/common/tls.h new file mode 100644 index 0000000000..33c776adc5 --- /dev/null +++ b/gfx/angle/checkout/src/common/tls.h @@ -0,0 +1,46 @@ +// +// Copyright (c) 2014 The ANGLE Project Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// + +// tls.h: Simple cross-platform interface for thread local storage. + +#ifndef COMMON_TLS_H_ +#define COMMON_TLS_H_ + +#include "common/platform.h" + +#ifdef ANGLE_PLATFORM_WINDOWS + +// TLS does not exist for Windows Store and needs to be emulated +# ifdef ANGLE_ENABLE_WINDOWS_STORE +# ifndef TLS_OUT_OF_INDEXES +# define TLS_OUT_OF_INDEXES static_cast<DWORD>(0xFFFFFFFF) +# endif +# ifndef CREATE_SUSPENDED +# define CREATE_SUSPENDED 0x00000004 +# endif +# endif +typedef DWORD TLSIndex; +# define TLS_INVALID_INDEX (TLS_OUT_OF_INDEXES) +#elif defined(ANGLE_PLATFORM_POSIX) +# include <errno.h> +# include <pthread.h> +# include <semaphore.h> +typedef pthread_key_t TLSIndex; +# define TLS_INVALID_INDEX (static_cast<TLSIndex>(-1)) +#else +# error Unsupported platform. +#endif + +// TODO(kbr): for POSIX platforms this will have to be changed to take +// in a destructor function pointer, to allow the thread-local storage +// to be properly deallocated upon thread exit. +TLSIndex CreateTLSIndex(); +bool DestroyTLSIndex(TLSIndex index); + +bool SetTLSValue(TLSIndex index, void *value); +void *GetTLSValue(TLSIndex index); + +#endif // COMMON_TLS_H_ diff --git a/gfx/angle/checkout/src/common/uniform_type_info_autogen.cpp b/gfx/angle/checkout/src/common/uniform_type_info_autogen.cpp new file mode 100644 index 0000000000..58a67fe475 --- /dev/null +++ b/gfx/angle/checkout/src/common/uniform_type_info_autogen.cpp @@ -0,0 +1,318 @@ +// GENERATED FILE - DO NOT EDIT. +// Generated by gen_uniform_type_table.py. +// +// Copyright 2019 The ANGLE Project Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// +// Uniform type info table: +// Metadata about a particular uniform format, indexed by GL type. + +#include <array> +#include "common/utilities.h" + +using namespace angle; + +namespace gl +{ + +namespace +{ +constexpr std::array<UniformTypeInfo, 62> kInfoTable = { + {{GL_NONE, GL_NONE, GL_NONE, GL_NONE, GL_NONE, SamplerFormat::InvalidEnum, 0, 0, 0, 0, 0 * 0, + 0 * 0, false, false, false, ""}, + {GL_BOOL, GL_BOOL, GL_NONE, GL_NONE, GL_NONE, SamplerFormat::InvalidEnum, 1, 1, 1, + sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, false, false, false, ""}, + {GL_BOOL_VEC2, GL_BOOL, GL_NONE, GL_NONE, GL_BOOL_VEC2, SamplerFormat::InvalidEnum, 1, 2, 2, + sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 2, false, false, false, ""}, + {GL_BOOL_VEC3, GL_BOOL, GL_NONE, GL_NONE, GL_BOOL_VEC3, SamplerFormat::InvalidEnum, 1, 3, 3, + sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 3, false, false, false, ""}, + {GL_BOOL_VEC4, GL_BOOL, GL_NONE, GL_NONE, GL_BOOL_VEC4, SamplerFormat::InvalidEnum, 1, 4, 4, + sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 4, false, false, false, ""}, + {GL_FLOAT, GL_FLOAT, GL_NONE, GL_NONE, GL_BOOL, SamplerFormat::InvalidEnum, 1, 1, 1, + sizeof(GLfloat), sizeof(GLfloat) * 4, sizeof(GLfloat) * 1, false, false, false, ""}, + {GL_FLOAT_MAT2, GL_FLOAT, GL_NONE, GL_FLOAT_MAT2, GL_NONE, SamplerFormat::InvalidEnum, 2, 2, 4, + sizeof(GLfloat), sizeof(GLfloat) * 8, sizeof(GLfloat) * 4, false, true, false, ""}, + {GL_FLOAT_MAT2x3, GL_FLOAT, GL_NONE, GL_FLOAT_MAT3x2, GL_NONE, SamplerFormat::InvalidEnum, 3, + 2, 6, sizeof(GLfloat), sizeof(GLfloat) * 12, sizeof(GLfloat) * 6, false, true, false, ""}, + {GL_FLOAT_MAT2x4, GL_FLOAT, GL_NONE, GL_FLOAT_MAT4x2, GL_NONE, SamplerFormat::InvalidEnum, 4, + 2, 8, sizeof(GLfloat), sizeof(GLfloat) * 16, sizeof(GLfloat) * 8, false, true, false, ""}, + {GL_FLOAT_MAT3, GL_FLOAT, GL_NONE, GL_FLOAT_MAT3, GL_NONE, SamplerFormat::InvalidEnum, 3, 3, 9, + sizeof(GLfloat), sizeof(GLfloat) * 12, sizeof(GLfloat) * 9, false, true, false, ""}, + {GL_FLOAT_MAT3x2, GL_FLOAT, GL_NONE, GL_FLOAT_MAT2x3, GL_NONE, SamplerFormat::InvalidEnum, 2, + 3, 6, sizeof(GLfloat), sizeof(GLfloat) * 8, sizeof(GLfloat) * 6, false, true, false, ""}, + {GL_FLOAT_MAT3x4, GL_FLOAT, GL_NONE, GL_FLOAT_MAT4x3, GL_NONE, SamplerFormat::InvalidEnum, 4, + 3, 12, sizeof(GLfloat), sizeof(GLfloat) * 16, sizeof(GLfloat) * 12, false, true, false, ""}, + {GL_FLOAT_MAT4, GL_FLOAT, GL_NONE, GL_FLOAT_MAT4, GL_NONE, SamplerFormat::InvalidEnum, 4, 4, + 16, sizeof(GLfloat), sizeof(GLfloat) * 16, sizeof(GLfloat) * 16, false, true, false, ""}, + {GL_FLOAT_MAT4x2, GL_FLOAT, GL_NONE, GL_FLOAT_MAT2x4, GL_NONE, SamplerFormat::InvalidEnum, 2, + 4, 8, sizeof(GLfloat), sizeof(GLfloat) * 8, sizeof(GLfloat) * 8, false, true, false, ""}, + {GL_FLOAT_MAT4x3, GL_FLOAT, GL_NONE, GL_FLOAT_MAT3x4, GL_NONE, SamplerFormat::InvalidEnum, 3, + 4, 12, sizeof(GLfloat), sizeof(GLfloat) * 12, sizeof(GLfloat) * 12, false, true, false, ""}, + {GL_FLOAT_VEC2, GL_FLOAT, GL_NONE, GL_NONE, GL_BOOL_VEC2, SamplerFormat::InvalidEnum, 1, 2, 2, + sizeof(GLfloat), sizeof(GLfloat) * 4, sizeof(GLfloat) * 2, false, false, false, ""}, + {GL_FLOAT_VEC3, GL_FLOAT, GL_NONE, GL_NONE, GL_BOOL_VEC3, SamplerFormat::InvalidEnum, 1, 3, 3, + sizeof(GLfloat), sizeof(GLfloat) * 4, sizeof(GLfloat) * 3, false, false, false, ""}, + {GL_FLOAT_VEC4, GL_FLOAT, GL_NONE, GL_NONE, GL_BOOL_VEC4, SamplerFormat::InvalidEnum, 1, 4, 4, + sizeof(GLfloat), sizeof(GLfloat) * 4, sizeof(GLfloat) * 4, false, false, false, ""}, + {GL_IMAGE_2D, GL_INT, GL_TEXTURE_2D, GL_NONE, GL_NONE, SamplerFormat::InvalidEnum, 1, 1, 1, + sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, false, false, true, "intBitsToFloat"}, + {GL_IMAGE_2D_ARRAY, GL_INT, GL_TEXTURE_2D_ARRAY, GL_NONE, GL_NONE, SamplerFormat::InvalidEnum, + 1, 1, 1, sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, false, false, true, + "intBitsToFloat"}, + {GL_IMAGE_3D, GL_INT, GL_TEXTURE_3D, GL_NONE, GL_NONE, SamplerFormat::InvalidEnum, 1, 1, 1, + sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, false, false, true, "intBitsToFloat"}, + {GL_IMAGE_CUBE, GL_INT, GL_TEXTURE_CUBE_MAP, GL_NONE, GL_NONE, SamplerFormat::InvalidEnum, 1, + 1, 1, sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, false, false, true, + "intBitsToFloat"}, + {GL_INT, GL_INT, GL_NONE, GL_NONE, GL_BOOL, SamplerFormat::InvalidEnum, 1, 1, 1, sizeof(GLint), + sizeof(GLint) * 4, sizeof(GLint) * 1, false, false, false, "intBitsToFloat"}, + {GL_INT_IMAGE_2D, GL_INT, GL_TEXTURE_2D, GL_NONE, GL_NONE, SamplerFormat::InvalidEnum, 1, 1, 1, + sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, false, false, true, "intBitsToFloat"}, + {GL_INT_IMAGE_2D_ARRAY, GL_INT, GL_TEXTURE_2D_ARRAY, GL_NONE, GL_NONE, + SamplerFormat::InvalidEnum, 1, 1, 1, sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, + false, false, true, "intBitsToFloat"}, + {GL_INT_IMAGE_3D, GL_INT, GL_TEXTURE_3D, GL_NONE, GL_NONE, SamplerFormat::InvalidEnum, 1, 1, 1, + sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, false, false, true, "intBitsToFloat"}, + {GL_INT_IMAGE_CUBE, GL_INT, GL_TEXTURE_CUBE_MAP, GL_NONE, GL_NONE, SamplerFormat::InvalidEnum, + 1, 1, 1, sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, false, false, true, + "intBitsToFloat"}, + {GL_INT_SAMPLER_2D, GL_INT, GL_TEXTURE_2D, GL_NONE, GL_NONE, SamplerFormat::Signed, 1, 1, 1, + sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, true, false, false, "intBitsToFloat"}, + {GL_INT_SAMPLER_2D_ARRAY, GL_INT, GL_TEXTURE_2D_ARRAY, GL_NONE, GL_NONE, SamplerFormat::Signed, + 1, 1, 1, sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, true, false, false, + "intBitsToFloat"}, + {GL_INT_SAMPLER_2D_MULTISAMPLE, GL_INT, GL_TEXTURE_2D_MULTISAMPLE, GL_NONE, GL_NONE, + SamplerFormat::Signed, 1, 1, 1, sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, true, + false, false, "intBitsToFloat"}, + {GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY, GL_INT, GL_TEXTURE_2D_MULTISAMPLE_ARRAY, GL_NONE, + GL_NONE, SamplerFormat::Signed, 1, 1, 1, sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, + true, false, false, "intBitsToFloat"}, + {GL_INT_SAMPLER_3D, GL_INT, GL_TEXTURE_3D, GL_NONE, GL_NONE, SamplerFormat::Signed, 1, 1, 1, + sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, true, false, false, "intBitsToFloat"}, + {GL_INT_SAMPLER_CUBE, GL_INT, GL_TEXTURE_CUBE_MAP, GL_NONE, GL_NONE, SamplerFormat::Signed, 1, + 1, 1, sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, true, false, false, + "intBitsToFloat"}, + {GL_INT_VEC2, GL_INT, GL_NONE, GL_NONE, GL_BOOL_VEC2, SamplerFormat::InvalidEnum, 1, 2, 2, + sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 2, false, false, false, "intBitsToFloat"}, + {GL_INT_VEC3, GL_INT, GL_NONE, GL_NONE, GL_BOOL_VEC3, SamplerFormat::InvalidEnum, 1, 3, 3, + sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 3, false, false, false, "intBitsToFloat"}, + {GL_INT_VEC4, GL_INT, GL_NONE, GL_NONE, GL_BOOL_VEC4, SamplerFormat::InvalidEnum, 1, 4, 4, + sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 4, false, false, false, "intBitsToFloat"}, + {GL_SAMPLER_2D, GL_INT, GL_TEXTURE_2D, GL_NONE, GL_NONE, SamplerFormat::Float, 1, 1, 1, + sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, true, false, false, "intBitsToFloat"}, + {GL_SAMPLER_2D_ARRAY, GL_INT, GL_TEXTURE_2D_ARRAY, GL_NONE, GL_NONE, SamplerFormat::Float, 1, + 1, 1, sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, true, false, false, + "intBitsToFloat"}, + {GL_SAMPLER_2D_ARRAY_SHADOW, GL_INT, GL_TEXTURE_2D_ARRAY, GL_NONE, GL_NONE, + SamplerFormat::Shadow, 1, 1, 1, sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, true, + false, false, "intBitsToFloat"}, + {GL_SAMPLER_2D_MULTISAMPLE, GL_INT, GL_TEXTURE_2D_MULTISAMPLE, GL_NONE, GL_NONE, + SamplerFormat::Float, 1, 1, 1, sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, true, + false, false, "intBitsToFloat"}, + {GL_SAMPLER_2D_MULTISAMPLE_ARRAY, GL_INT, GL_TEXTURE_2D_MULTISAMPLE_ARRAY, GL_NONE, GL_NONE, + SamplerFormat::Float, 1, 1, 1, sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, true, + false, false, "intBitsToFloat"}, + {GL_SAMPLER_2D_RECT_ANGLE, GL_INT, GL_TEXTURE_2D, GL_NONE, GL_NONE, SamplerFormat::Float, 1, 1, + 1, sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, true, false, false, "intBitsToFloat"}, + {GL_SAMPLER_2D_SHADOW, GL_INT, GL_TEXTURE_2D, GL_NONE, GL_NONE, SamplerFormat::Shadow, 1, 1, 1, + sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, true, false, false, "intBitsToFloat"}, + {GL_SAMPLER_3D, GL_INT, GL_TEXTURE_3D, GL_NONE, GL_NONE, SamplerFormat::Float, 1, 1, 1, + sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, true, false, false, "intBitsToFloat"}, + {GL_SAMPLER_CUBE, GL_INT, GL_TEXTURE_CUBE_MAP, GL_NONE, GL_NONE, SamplerFormat::Float, 1, 1, 1, + sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, true, false, false, "intBitsToFloat"}, + {GL_SAMPLER_CUBE_SHADOW, GL_INT, GL_TEXTURE_CUBE_MAP, GL_NONE, GL_NONE, SamplerFormat::Shadow, + 1, 1, 1, sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, true, false, false, + "intBitsToFloat"}, + {GL_SAMPLER_EXTERNAL_OES, GL_INT, GL_TEXTURE_EXTERNAL_OES, GL_NONE, GL_NONE, + SamplerFormat::Float, 1, 1, 1, sizeof(GLint), sizeof(GLint) * 4, sizeof(GLint) * 1, true, + false, false, "intBitsToFloat"}, + {GL_UNSIGNED_INT, GL_UNSIGNED_INT, GL_NONE, GL_NONE, GL_BOOL, SamplerFormat::InvalidEnum, 1, 1, + 1, sizeof(GLuint), sizeof(GLuint) * 4, sizeof(GLuint) * 1, false, false, false, + "uintBitsToFloat"}, + {GL_UNSIGNED_INT_ATOMIC_COUNTER, GL_UNSIGNED_INT, GL_NONE, GL_NONE, GL_NONE, + SamplerFormat::InvalidEnum, 1, 1, 1, sizeof(GLuint), sizeof(GLuint) * 4, sizeof(GLuint) * 1, + false, false, false, "uintBitsToFloat"}, + {GL_UNSIGNED_INT_IMAGE_2D, GL_UNSIGNED_INT, GL_TEXTURE_2D, GL_NONE, GL_NONE, + SamplerFormat::InvalidEnum, 1, 1, 1, sizeof(GLuint), sizeof(GLuint) * 4, sizeof(GLuint) * 1, + false, false, true, "uintBitsToFloat"}, + {GL_UNSIGNED_INT_IMAGE_2D_ARRAY, GL_UNSIGNED_INT, GL_TEXTURE_2D_ARRAY, GL_NONE, GL_NONE, + SamplerFormat::InvalidEnum, 1, 1, 1, sizeof(GLuint), sizeof(GLuint) * 4, sizeof(GLuint) * 1, + false, false, true, "uintBitsToFloat"}, + {GL_UNSIGNED_INT_IMAGE_3D, GL_UNSIGNED_INT, GL_TEXTURE_3D, GL_NONE, GL_NONE, + SamplerFormat::InvalidEnum, 1, 1, 1, sizeof(GLuint), sizeof(GLuint) * 4, sizeof(GLuint) * 1, + false, false, true, "uintBitsToFloat"}, + {GL_UNSIGNED_INT_IMAGE_CUBE, GL_UNSIGNED_INT, GL_TEXTURE_CUBE_MAP, GL_NONE, GL_NONE, + SamplerFormat::InvalidEnum, 1, 1, 1, sizeof(GLuint), sizeof(GLuint) * 4, sizeof(GLuint) * 1, + false, false, true, "uintBitsToFloat"}, + {GL_UNSIGNED_INT_SAMPLER_2D, GL_UNSIGNED_INT, GL_TEXTURE_2D, GL_NONE, GL_NONE, + SamplerFormat::Unsigned, 1, 1, 1, sizeof(GLuint), sizeof(GLuint) * 4, sizeof(GLuint) * 1, + true, false, false, "uintBitsToFloat"}, + {GL_UNSIGNED_INT_SAMPLER_2D_ARRAY, GL_UNSIGNED_INT, GL_TEXTURE_2D_ARRAY, GL_NONE, GL_NONE, + SamplerFormat::Unsigned, 1, 1, 1, sizeof(GLuint), sizeof(GLuint) * 4, sizeof(GLuint) * 1, + true, false, false, "uintBitsToFloat"}, + {GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE, GL_UNSIGNED_INT, GL_TEXTURE_2D_MULTISAMPLE, GL_NONE, + GL_NONE, SamplerFormat::Unsigned, 1, 1, 1, sizeof(GLuint), sizeof(GLuint) * 4, + sizeof(GLuint) * 1, true, false, false, "uintBitsToFloat"}, + {GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY, GL_UNSIGNED_INT, + GL_TEXTURE_2D_MULTISAMPLE_ARRAY, GL_NONE, GL_NONE, SamplerFormat::Unsigned, 1, 1, 1, + sizeof(GLuint), sizeof(GLuint) * 4, sizeof(GLuint) * 1, true, false, false, + "uintBitsToFloat"}, + {GL_UNSIGNED_INT_SAMPLER_3D, GL_UNSIGNED_INT, GL_TEXTURE_3D, GL_NONE, GL_NONE, + SamplerFormat::Unsigned, 1, 1, 1, sizeof(GLuint), sizeof(GLuint) * 4, sizeof(GLuint) * 1, + true, false, false, "uintBitsToFloat"}, + {GL_UNSIGNED_INT_SAMPLER_CUBE, GL_UNSIGNED_INT, GL_TEXTURE_CUBE_MAP, GL_NONE, GL_NONE, + SamplerFormat::Unsigned, 1, 1, 1, sizeof(GLuint), sizeof(GLuint) * 4, sizeof(GLuint) * 1, + true, false, false, "uintBitsToFloat"}, + {GL_UNSIGNED_INT_VEC2, GL_UNSIGNED_INT, GL_NONE, GL_NONE, GL_BOOL_VEC2, + SamplerFormat::InvalidEnum, 1, 2, 2, sizeof(GLuint), sizeof(GLuint) * 4, sizeof(GLuint) * 2, + false, false, false, "uintBitsToFloat"}, + {GL_UNSIGNED_INT_VEC3, GL_UNSIGNED_INT, GL_NONE, GL_NONE, GL_BOOL_VEC3, + SamplerFormat::InvalidEnum, 1, 3, 3, sizeof(GLuint), sizeof(GLuint) * 4, sizeof(GLuint) * 3, + false, false, false, "uintBitsToFloat"}, + {GL_UNSIGNED_INT_VEC4, GL_UNSIGNED_INT, GL_NONE, GL_NONE, GL_BOOL_VEC4, + SamplerFormat::InvalidEnum, 1, 4, 4, sizeof(GLuint), sizeof(GLuint) * 4, sizeof(GLuint) * 4, + false, false, false, "uintBitsToFloat"}}}; + +size_t GetTypeInfoIndex(GLenum uniformType) +{ + switch (uniformType) + { + case GL_NONE: + return 0; + case GL_BOOL: + return 1; + case GL_BOOL_VEC2: + return 2; + case GL_BOOL_VEC3: + return 3; + case GL_BOOL_VEC4: + return 4; + case GL_FLOAT: + return 5; + case GL_FLOAT_MAT2: + return 6; + case GL_FLOAT_MAT2x3: + return 7; + case GL_FLOAT_MAT2x4: + return 8; + case GL_FLOAT_MAT3: + return 9; + case GL_FLOAT_MAT3x2: + return 10; + case GL_FLOAT_MAT3x4: + return 11; + case GL_FLOAT_MAT4: + return 12; + case GL_FLOAT_MAT4x2: + return 13; + case GL_FLOAT_MAT4x3: + return 14; + case GL_FLOAT_VEC2: + return 15; + case GL_FLOAT_VEC3: + return 16; + case GL_FLOAT_VEC4: + return 17; + case GL_IMAGE_2D: + return 18; + case GL_IMAGE_2D_ARRAY: + return 19; + case GL_IMAGE_3D: + return 20; + case GL_IMAGE_CUBE: + return 21; + case GL_INT: + return 22; + case GL_INT_IMAGE_2D: + return 23; + case GL_INT_IMAGE_2D_ARRAY: + return 24; + case GL_INT_IMAGE_3D: + return 25; + case GL_INT_IMAGE_CUBE: + return 26; + case GL_INT_SAMPLER_2D: + return 27; + case GL_INT_SAMPLER_2D_ARRAY: + return 28; + case GL_INT_SAMPLER_2D_MULTISAMPLE: + return 29; + case GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY: + return 30; + case GL_INT_SAMPLER_3D: + return 31; + case GL_INT_SAMPLER_CUBE: + return 32; + case GL_INT_VEC2: + return 33; + case GL_INT_VEC3: + return 34; + case GL_INT_VEC4: + return 35; + case GL_SAMPLER_2D: + return 36; + case GL_SAMPLER_2D_ARRAY: + return 37; + case GL_SAMPLER_2D_ARRAY_SHADOW: + return 38; + case GL_SAMPLER_2D_MULTISAMPLE: + return 39; + case GL_SAMPLER_2D_MULTISAMPLE_ARRAY: + return 40; + case GL_SAMPLER_2D_RECT_ANGLE: + return 41; + case GL_SAMPLER_2D_SHADOW: + return 42; + case GL_SAMPLER_3D: + return 43; + case GL_SAMPLER_CUBE: + return 44; + case GL_SAMPLER_CUBE_SHADOW: + return 45; + case GL_SAMPLER_EXTERNAL_OES: + return 46; + case GL_UNSIGNED_INT: + return 47; + case GL_UNSIGNED_INT_ATOMIC_COUNTER: + return 48; + case GL_UNSIGNED_INT_IMAGE_2D: + return 49; + case GL_UNSIGNED_INT_IMAGE_2D_ARRAY: + return 50; + case GL_UNSIGNED_INT_IMAGE_3D: + return 51; + case GL_UNSIGNED_INT_IMAGE_CUBE: + return 52; + case GL_UNSIGNED_INT_SAMPLER_2D: + return 53; + case GL_UNSIGNED_INT_SAMPLER_2D_ARRAY: + return 54; + case GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE: + return 55; + case GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY: + return 56; + case GL_UNSIGNED_INT_SAMPLER_3D: + return 57; + case GL_UNSIGNED_INT_SAMPLER_CUBE: + return 58; + case GL_UNSIGNED_INT_VEC2: + return 59; + case GL_UNSIGNED_INT_VEC3: + return 60; + case GL_UNSIGNED_INT_VEC4: + return 61; + default: + UNREACHABLE(); + return 0; + } +} +} // anonymous namespace + +const UniformTypeInfo &GetUniformTypeInfo(GLenum uniformType) +{ + ASSERT(kInfoTable[GetTypeInfoIndex(uniformType)].type == uniformType); + return kInfoTable[GetTypeInfoIndex(uniformType)]; +} + +} // namespace gl diff --git a/gfx/angle/checkout/src/common/utilities.cpp b/gfx/angle/checkout/src/common/utilities.cpp new file mode 100644 index 0000000000..ea18cb356d --- /dev/null +++ b/gfx/angle/checkout/src/common/utilities.cpp @@ -0,0 +1,1110 @@ +// +// Copyright (c) 2002-2013 The ANGLE Project Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// + +// utilities.cpp: Conversion functions and other utility routines. + +#include "common/utilities.h" +#include <GLSLANG/ShaderVars.h> +#include "common/mathutil.h" +#include "common/platform.h" + +#include <set> + +#if defined(ANGLE_ENABLE_WINDOWS_STORE) +# include <windows.applicationmodel.core.h> +# include <windows.graphics.display.h> +# include <wrl.h> +# include <wrl/wrappers/corewrappers.h> +#endif + +namespace +{ + +template <class IndexType> +gl::IndexRange ComputeTypedIndexRange(const IndexType *indices, + size_t count, + bool primitiveRestartEnabled, + GLuint primitiveRestartIndex) +{ + ASSERT(count > 0); + + IndexType minIndex = 0; + IndexType maxIndex = 0; + size_t nonPrimitiveRestartIndices = 0; + + if (primitiveRestartEnabled) + { + // Find the first non-primitive restart index to initialize the min and max values + size_t i = 0; + for (; i < count; i++) + { + if (indices[i] != primitiveRestartIndex) + { + minIndex = indices[i]; + maxIndex = indices[i]; + nonPrimitiveRestartIndices++; + break; + } + } + + // Loop over the rest of the indices + for (; i < count; i++) + { + if (indices[i] != primitiveRestartIndex) + { + if (minIndex > indices[i]) + { + minIndex = indices[i]; + } + if (maxIndex < indices[i]) + { + maxIndex = indices[i]; + } + nonPrimitiveRestartIndices++; + } + } + } + else + { + minIndex = indices[0]; + maxIndex = indices[0]; + nonPrimitiveRestartIndices = count; + + for (size_t i = 1; i < count; i++) + { + if (minIndex > indices[i]) + { + minIndex = indices[i]; + } + if (maxIndex < indices[i]) + { + maxIndex = indices[i]; + } + } + } + + return gl::IndexRange(static_cast<size_t>(minIndex), static_cast<size_t>(maxIndex), + nonPrimitiveRestartIndices); +} + +} // anonymous namespace + +namespace gl +{ + +int VariableComponentCount(GLenum type) +{ + return VariableRowCount(type) * VariableColumnCount(type); +} + +GLenum VariableComponentType(GLenum type) +{ + switch (type) + { + case GL_BOOL: + case GL_BOOL_VEC2: + case GL_BOOL_VEC3: + case GL_BOOL_VEC4: + return GL_BOOL; + case GL_FLOAT: + case GL_FLOAT_VEC2: + case GL_FLOAT_VEC3: + case GL_FLOAT_VEC4: + case GL_FLOAT_MAT2: + case GL_FLOAT_MAT3: + case GL_FLOAT_MAT4: + case GL_FLOAT_MAT2x3: + case GL_FLOAT_MAT3x2: + case GL_FLOAT_MAT2x4: + case GL_FLOAT_MAT4x2: + case GL_FLOAT_MAT3x4: + case GL_FLOAT_MAT4x3: + return GL_FLOAT; + case GL_INT: + case GL_SAMPLER_2D: + case GL_SAMPLER_2D_RECT_ANGLE: + case GL_SAMPLER_3D: + case GL_SAMPLER_CUBE: + case GL_SAMPLER_2D_ARRAY: + case GL_SAMPLER_EXTERNAL_OES: + case GL_SAMPLER_2D_MULTISAMPLE: + case GL_SAMPLER_2D_MULTISAMPLE_ARRAY: + case GL_INT_SAMPLER_2D: + case GL_INT_SAMPLER_3D: + case GL_INT_SAMPLER_CUBE: + case GL_INT_SAMPLER_2D_ARRAY: + case GL_INT_SAMPLER_2D_MULTISAMPLE: + case GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY: + case GL_UNSIGNED_INT_SAMPLER_2D: + case GL_UNSIGNED_INT_SAMPLER_3D: + case GL_UNSIGNED_INT_SAMPLER_CUBE: + case GL_UNSIGNED_INT_SAMPLER_2D_ARRAY: + case GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE: + case GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY: + case GL_SAMPLER_2D_SHADOW: + case GL_SAMPLER_CUBE_SHADOW: + case GL_SAMPLER_2D_ARRAY_SHADOW: + case GL_INT_VEC2: + case GL_INT_VEC3: + case GL_INT_VEC4: + case GL_IMAGE_2D: + case GL_INT_IMAGE_2D: + case GL_UNSIGNED_INT_IMAGE_2D: + case GL_IMAGE_3D: + case GL_INT_IMAGE_3D: + case GL_UNSIGNED_INT_IMAGE_3D: + case GL_IMAGE_2D_ARRAY: + case GL_INT_IMAGE_2D_ARRAY: + case GL_UNSIGNED_INT_IMAGE_2D_ARRAY: + case GL_IMAGE_CUBE: + case GL_INT_IMAGE_CUBE: + case GL_UNSIGNED_INT_IMAGE_CUBE: + case GL_UNSIGNED_INT_ATOMIC_COUNTER: + return GL_INT; + case GL_UNSIGNED_INT: + case GL_UNSIGNED_INT_VEC2: + case GL_UNSIGNED_INT_VEC3: + case GL_UNSIGNED_INT_VEC4: + return GL_UNSIGNED_INT; + default: + UNREACHABLE(); + } + + return GL_NONE; +} + +size_t VariableComponentSize(GLenum type) +{ + switch (type) + { + case GL_BOOL: + return sizeof(GLint); + case GL_FLOAT: + return sizeof(GLfloat); + case GL_INT: + return sizeof(GLint); + case GL_UNSIGNED_INT: + return sizeof(GLuint); + default: + UNREACHABLE(); + } + + return 0; +} + +size_t VariableInternalSize(GLenum type) +{ + // Expanded to 4-element vectors + return VariableComponentSize(VariableComponentType(type)) * VariableRowCount(type) * 4; +} + +size_t VariableExternalSize(GLenum type) +{ + return VariableComponentSize(VariableComponentType(type)) * VariableComponentCount(type); +} + +GLenum VariableBoolVectorType(GLenum type) +{ + switch (type) + { + case GL_FLOAT: + case GL_INT: + case GL_UNSIGNED_INT: + return GL_BOOL; + case GL_FLOAT_VEC2: + case GL_INT_VEC2: + case GL_UNSIGNED_INT_VEC2: + return GL_BOOL_VEC2; + case GL_FLOAT_VEC3: + case GL_INT_VEC3: + case GL_UNSIGNED_INT_VEC3: + return GL_BOOL_VEC3; + case GL_FLOAT_VEC4: + case GL_INT_VEC4: + case GL_UNSIGNED_INT_VEC4: + return GL_BOOL_VEC4; + + default: + UNREACHABLE(); + return GL_NONE; + } +} + +int VariableRowCount(GLenum type) +{ + switch (type) + { + case GL_NONE: + return 0; + case GL_BOOL: + case GL_FLOAT: + case GL_INT: + case GL_UNSIGNED_INT: + case GL_BOOL_VEC2: + case GL_FLOAT_VEC2: + case GL_INT_VEC2: + case GL_UNSIGNED_INT_VEC2: + case GL_BOOL_VEC3: + case GL_FLOAT_VEC3: + case GL_INT_VEC3: + case GL_UNSIGNED_INT_VEC3: + case GL_BOOL_VEC4: + case GL_FLOAT_VEC4: + case GL_INT_VEC4: + case GL_UNSIGNED_INT_VEC4: + case GL_SAMPLER_2D: + case GL_SAMPLER_3D: + case GL_SAMPLER_CUBE: + case GL_SAMPLER_2D_ARRAY: + case GL_SAMPLER_EXTERNAL_OES: + case GL_SAMPLER_2D_RECT_ANGLE: + case GL_SAMPLER_2D_MULTISAMPLE: + case GL_SAMPLER_2D_MULTISAMPLE_ARRAY: + case GL_INT_SAMPLER_2D: + case GL_INT_SAMPLER_3D: + case GL_INT_SAMPLER_CUBE: + case GL_INT_SAMPLER_2D_ARRAY: + case GL_INT_SAMPLER_2D_MULTISAMPLE: + case GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY: + case GL_UNSIGNED_INT_SAMPLER_2D: + case GL_UNSIGNED_INT_SAMPLER_3D: + case GL_UNSIGNED_INT_SAMPLER_CUBE: + case GL_UNSIGNED_INT_SAMPLER_2D_ARRAY: + case GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE: + case GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY: + case GL_SAMPLER_2D_SHADOW: + case GL_SAMPLER_CUBE_SHADOW: + case GL_SAMPLER_2D_ARRAY_SHADOW: + case GL_IMAGE_2D: + case GL_INT_IMAGE_2D: + case GL_UNSIGNED_INT_IMAGE_2D: + case GL_IMAGE_2D_ARRAY: + case GL_INT_IMAGE_2D_ARRAY: + case GL_UNSIGNED_INT_IMAGE_2D_ARRAY: + case GL_IMAGE_3D: + case GL_INT_IMAGE_3D: + case GL_UNSIGNED_INT_IMAGE_3D: + case GL_IMAGE_CUBE: + case GL_INT_IMAGE_CUBE: + case GL_UNSIGNED_INT_IMAGE_CUBE: + case GL_UNSIGNED_INT_ATOMIC_COUNTER: + return 1; + case GL_FLOAT_MAT2: + case GL_FLOAT_MAT3x2: + case GL_FLOAT_MAT4x2: + return 2; + case GL_FLOAT_MAT3: + case GL_FLOAT_MAT2x3: + case GL_FLOAT_MAT4x3: + return 3; + case GL_FLOAT_MAT4: + case GL_FLOAT_MAT2x4: + case GL_FLOAT_MAT3x4: + return 4; + default: + UNREACHABLE(); + } + + return 0; +} + +int VariableColumnCount(GLenum type) +{ + switch (type) + { + case GL_NONE: + return 0; + case GL_BOOL: + case GL_FLOAT: + case GL_INT: + case GL_UNSIGNED_INT: + case GL_SAMPLER_2D: + case GL_SAMPLER_3D: + case GL_SAMPLER_CUBE: + case GL_SAMPLER_2D_ARRAY: + case GL_SAMPLER_2D_MULTISAMPLE: + case GL_SAMPLER_2D_MULTISAMPLE_ARRAY: + case GL_INT_SAMPLER_2D: + case GL_INT_SAMPLER_3D: + case GL_INT_SAMPLER_CUBE: + case GL_INT_SAMPLER_2D_ARRAY: + case GL_INT_SAMPLER_2D_MULTISAMPLE: + case GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY: + case GL_SAMPLER_EXTERNAL_OES: + case GL_SAMPLER_2D_RECT_ANGLE: + case GL_UNSIGNED_INT_SAMPLER_2D: + case GL_UNSIGNED_INT_SAMPLER_3D: + case GL_UNSIGNED_INT_SAMPLER_CUBE: + case GL_UNSIGNED_INT_SAMPLER_2D_ARRAY: + case GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE: + case GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY: + case GL_SAMPLER_2D_SHADOW: + case GL_SAMPLER_CUBE_SHADOW: + case GL_SAMPLER_2D_ARRAY_SHADOW: + case GL_IMAGE_2D: + case GL_INT_IMAGE_2D: + case GL_UNSIGNED_INT_IMAGE_2D: + case GL_IMAGE_3D: + case GL_INT_IMAGE_3D: + case GL_UNSIGNED_INT_IMAGE_3D: + case GL_IMAGE_2D_ARRAY: + case GL_INT_IMAGE_2D_ARRAY: + case GL_UNSIGNED_INT_IMAGE_2D_ARRAY: + case GL_IMAGE_CUBE: + case GL_INT_IMAGE_CUBE: + case GL_UNSIGNED_INT_IMAGE_CUBE: + case GL_UNSIGNED_INT_ATOMIC_COUNTER: + return 1; + case GL_BOOL_VEC2: + case GL_FLOAT_VEC2: + case GL_INT_VEC2: + case GL_UNSIGNED_INT_VEC2: + case GL_FLOAT_MAT2: + case GL_FLOAT_MAT2x3: + case GL_FLOAT_MAT2x4: + return 2; + case GL_BOOL_VEC3: + case GL_FLOAT_VEC3: + case GL_INT_VEC3: + case GL_UNSIGNED_INT_VEC3: + case GL_FLOAT_MAT3: + case GL_FLOAT_MAT3x2: + case GL_FLOAT_MAT3x4: + return 3; + case GL_BOOL_VEC4: + case GL_FLOAT_VEC4: + case GL_INT_VEC4: + case GL_UNSIGNED_INT_VEC4: + case GL_FLOAT_MAT4: + case GL_FLOAT_MAT4x2: + case GL_FLOAT_MAT4x3: + return 4; + default: + UNREACHABLE(); + } + + return 0; +} + +bool IsSamplerType(GLenum type) +{ + switch (type) + { + case GL_SAMPLER_2D: + case GL_SAMPLER_3D: + case GL_SAMPLER_CUBE: + case GL_SAMPLER_2D_ARRAY: + case GL_SAMPLER_EXTERNAL_OES: + case GL_SAMPLER_2D_MULTISAMPLE: + case GL_SAMPLER_2D_MULTISAMPLE_ARRAY: + case GL_SAMPLER_2D_RECT_ANGLE: + case GL_INT_SAMPLER_2D: + case GL_INT_SAMPLER_3D: + case GL_INT_SAMPLER_CUBE: + case GL_INT_SAMPLER_2D_ARRAY: + case GL_INT_SAMPLER_2D_MULTISAMPLE: + case GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY: + case GL_UNSIGNED_INT_SAMPLER_2D: + case GL_UNSIGNED_INT_SAMPLER_3D: + case GL_UNSIGNED_INT_SAMPLER_CUBE: + case GL_UNSIGNED_INT_SAMPLER_2D_ARRAY: + case GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE: + case GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY: + case GL_SAMPLER_2D_SHADOW: + case GL_SAMPLER_CUBE_SHADOW: + case GL_SAMPLER_2D_ARRAY_SHADOW: + return true; + } + + return false; +} + +bool IsImageType(GLenum type) +{ + switch (type) + { + case GL_IMAGE_2D: + case GL_INT_IMAGE_2D: + case GL_UNSIGNED_INT_IMAGE_2D: + case GL_IMAGE_3D: + case GL_INT_IMAGE_3D: + case GL_UNSIGNED_INT_IMAGE_3D: + case GL_IMAGE_2D_ARRAY: + case GL_INT_IMAGE_2D_ARRAY: + case GL_UNSIGNED_INT_IMAGE_2D_ARRAY: + case GL_IMAGE_CUBE: + case GL_INT_IMAGE_CUBE: + case GL_UNSIGNED_INT_IMAGE_CUBE: + return true; + } + return false; +} + +bool IsImage2DType(GLenum type) +{ + switch (type) + { + case GL_IMAGE_2D: + case GL_INT_IMAGE_2D: + case GL_UNSIGNED_INT_IMAGE_2D: + return true; + case GL_IMAGE_3D: + case GL_INT_IMAGE_3D: + case GL_UNSIGNED_INT_IMAGE_3D: + case GL_IMAGE_2D_ARRAY: + case GL_INT_IMAGE_2D_ARRAY: + case GL_UNSIGNED_INT_IMAGE_2D_ARRAY: + case GL_IMAGE_CUBE: + case GL_INT_IMAGE_CUBE: + case GL_UNSIGNED_INT_IMAGE_CUBE: + return false; + default: + UNREACHABLE(); + return false; + } +} + +bool IsAtomicCounterType(GLenum type) +{ + return type == GL_UNSIGNED_INT_ATOMIC_COUNTER; +} + +bool IsOpaqueType(GLenum type) +{ + // ESSL 3.10 section 4.1.7 defines opaque types as: samplers, images and atomic counters. + return IsImageType(type) || IsSamplerType(type) || IsAtomicCounterType(type); +} + +bool IsMatrixType(GLenum type) +{ + return VariableRowCount(type) > 1; +} + +GLenum TransposeMatrixType(GLenum type) +{ + if (!IsMatrixType(type)) + { + return type; + } + + switch (type) + { + case GL_FLOAT_MAT2: + return GL_FLOAT_MAT2; + case GL_FLOAT_MAT3: + return GL_FLOAT_MAT3; + case GL_FLOAT_MAT4: + return GL_FLOAT_MAT4; + case GL_FLOAT_MAT2x3: + return GL_FLOAT_MAT3x2; + case GL_FLOAT_MAT3x2: + return GL_FLOAT_MAT2x3; + case GL_FLOAT_MAT2x4: + return GL_FLOAT_MAT4x2; + case GL_FLOAT_MAT4x2: + return GL_FLOAT_MAT2x4; + case GL_FLOAT_MAT3x4: + return GL_FLOAT_MAT4x3; + case GL_FLOAT_MAT4x3: + return GL_FLOAT_MAT3x4; + default: + UNREACHABLE(); + return GL_NONE; + } +} + +int MatrixRegisterCount(GLenum type, bool isRowMajorMatrix) +{ + ASSERT(IsMatrixType(type)); + return isRowMajorMatrix ? VariableRowCount(type) : VariableColumnCount(type); +} + +int MatrixComponentCount(GLenum type, bool isRowMajorMatrix) +{ + ASSERT(IsMatrixType(type)); + return isRowMajorMatrix ? VariableColumnCount(type) : VariableRowCount(type); +} + +int VariableRegisterCount(GLenum type) +{ + return IsMatrixType(type) ? VariableColumnCount(type) : 1; +} + +int AllocateFirstFreeBits(unsigned int *bits, unsigned int allocationSize, unsigned int bitsSize) +{ + ASSERT(allocationSize <= bitsSize); + + unsigned int mask = std::numeric_limits<unsigned int>::max() >> + (std::numeric_limits<unsigned int>::digits - allocationSize); + + for (unsigned int i = 0; i < bitsSize - allocationSize + 1; i++) + { + if ((*bits & mask) == 0) + { + *bits |= mask; + return i; + } + + mask <<= 1; + } + + return -1; +} + +IndexRange ComputeIndexRange(DrawElementsType indexType, + const GLvoid *indices, + size_t count, + bool primitiveRestartEnabled) +{ + switch (indexType) + { + case DrawElementsType::UnsignedByte: + return ComputeTypedIndexRange(static_cast<const GLubyte *>(indices), count, + primitiveRestartEnabled, + GetPrimitiveRestartIndex(indexType)); + case DrawElementsType::UnsignedShort: + return ComputeTypedIndexRange(static_cast<const GLushort *>(indices), count, + primitiveRestartEnabled, + GetPrimitiveRestartIndex(indexType)); + case DrawElementsType::UnsignedInt: + return ComputeTypedIndexRange(static_cast<const GLuint *>(indices), count, + primitiveRestartEnabled, + GetPrimitiveRestartIndex(indexType)); + default: + UNREACHABLE(); + return IndexRange(); + } +} + +GLuint GetPrimitiveRestartIndex(DrawElementsType indexType) +{ + switch (indexType) + { + case DrawElementsType::UnsignedByte: + return 0xFF; + case DrawElementsType::UnsignedShort: + return 0xFFFF; + case DrawElementsType::UnsignedInt: + return 0xFFFFFFFF; + default: + UNREACHABLE(); + return 0; + } +} + +bool IsTriangleMode(PrimitiveMode drawMode) +{ + switch (drawMode) + { + case PrimitiveMode::Triangles: + case PrimitiveMode::TriangleFan: + case PrimitiveMode::TriangleStrip: + return true; + case PrimitiveMode::Points: + case PrimitiveMode::Lines: + case PrimitiveMode::LineLoop: + case PrimitiveMode::LineStrip: + return false; + default: + UNREACHABLE(); + } + + return false; +} + +namespace priv +{ +const angle::PackedEnumMap<PrimitiveMode, bool> gLineModes = { + {{PrimitiveMode::LineLoop, true}, + {PrimitiveMode::LineStrip, true}, + {PrimitiveMode::LineStripAdjacency, true}, + {PrimitiveMode::Lines, true}}}; +} // namespace priv + +bool IsIntegerFormat(GLenum unsizedFormat) +{ + switch (unsizedFormat) + { + case GL_RGBA_INTEGER: + case GL_RGB_INTEGER: + case GL_RG_INTEGER: + case GL_RED_INTEGER: + return true; + + default: + return false; + } +} + +// [OpenGL ES SL 3.00.4] Section 11 p. 120 +// Vertex Outs/Fragment Ins packing priorities +int VariableSortOrder(GLenum type) +{ + switch (type) + { + // 1. Arrays of mat4 and mat4 + // Non-square matrices of type matCxR consume the same space as a square + // matrix of type matN where N is the greater of C and R + case GL_FLOAT_MAT4: + case GL_FLOAT_MAT2x4: + case GL_FLOAT_MAT3x4: + case GL_FLOAT_MAT4x2: + case GL_FLOAT_MAT4x3: + return 0; + + // 2. Arrays of mat2 and mat2 (since they occupy full rows) + case GL_FLOAT_MAT2: + return 1; + + // 3. Arrays of vec4 and vec4 + case GL_FLOAT_VEC4: + case GL_INT_VEC4: + case GL_BOOL_VEC4: + case GL_UNSIGNED_INT_VEC4: + return 2; + + // 4. Arrays of mat3 and mat3 + case GL_FLOAT_MAT3: + case GL_FLOAT_MAT2x3: + case GL_FLOAT_MAT3x2: + return 3; + + // 5. Arrays of vec3 and vec3 + case GL_FLOAT_VEC3: + case GL_INT_VEC3: + case GL_BOOL_VEC3: + case GL_UNSIGNED_INT_VEC3: + return 4; + + // 6. Arrays of vec2 and vec2 + case GL_FLOAT_VEC2: + case GL_INT_VEC2: + case GL_BOOL_VEC2: + case GL_UNSIGNED_INT_VEC2: + return 5; + + // 7. Single component types + case GL_FLOAT: + case GL_INT: + case GL_BOOL: + case GL_UNSIGNED_INT: + case GL_SAMPLER_2D: + case GL_SAMPLER_CUBE: + case GL_SAMPLER_EXTERNAL_OES: + case GL_SAMPLER_2D_RECT_ANGLE: + case GL_SAMPLER_2D_ARRAY: + case GL_SAMPLER_2D_MULTISAMPLE: + case GL_SAMPLER_2D_MULTISAMPLE_ARRAY: + case GL_SAMPLER_3D: + case GL_INT_SAMPLER_2D: + case GL_INT_SAMPLER_3D: + case GL_INT_SAMPLER_CUBE: + case GL_INT_SAMPLER_2D_ARRAY: + case GL_INT_SAMPLER_2D_MULTISAMPLE: + case GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY: + case GL_UNSIGNED_INT_SAMPLER_2D: + case GL_UNSIGNED_INT_SAMPLER_3D: + case GL_UNSIGNED_INT_SAMPLER_CUBE: + case GL_UNSIGNED_INT_SAMPLER_2D_ARRAY: + case GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE: + case GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY: + case GL_SAMPLER_2D_SHADOW: + case GL_SAMPLER_2D_ARRAY_SHADOW: + case GL_SAMPLER_CUBE_SHADOW: + case GL_IMAGE_2D: + case GL_INT_IMAGE_2D: + case GL_UNSIGNED_INT_IMAGE_2D: + case GL_IMAGE_3D: + case GL_INT_IMAGE_3D: + case GL_UNSIGNED_INT_IMAGE_3D: + case GL_IMAGE_2D_ARRAY: + case GL_INT_IMAGE_2D_ARRAY: + case GL_UNSIGNED_INT_IMAGE_2D_ARRAY: + case GL_IMAGE_CUBE: + case GL_INT_IMAGE_CUBE: + case GL_UNSIGNED_INT_IMAGE_CUBE: + case GL_UNSIGNED_INT_ATOMIC_COUNTER: + return 6; + + default: + UNREACHABLE(); + return 0; + } +} + +std::string ParseResourceName(const std::string &name, std::vector<unsigned int> *outSubscripts) +{ + if (outSubscripts) + { + outSubscripts->clear(); + } + // Strip any trailing array indexing operators and retrieve the subscripts. + size_t baseNameLength = name.length(); + bool hasIndex = true; + while (hasIndex) + { + size_t open = name.find_last_of('[', baseNameLength - 1); + size_t close = name.find_last_of(']', baseNameLength - 1); + hasIndex = (open != std::string::npos) && (close == baseNameLength - 1); + if (hasIndex) + { + baseNameLength = open; + if (outSubscripts) + { + int index = atoi(name.substr(open + 1).c_str()); + if (index >= 0) + { + outSubscripts->push_back(index); + } + else + { + outSubscripts->push_back(GL_INVALID_INDEX); + } + } + } + } + + return name.substr(0, baseNameLength); +} + +const sh::ShaderVariable *FindShaderVarField(const sh::ShaderVariable &var, + const std::string &fullName, + GLuint *fieldIndexOut) +{ + if (var.fields.empty()) + { + return nullptr; + } + size_t pos = fullName.find_first_of("."); + if (pos == std::string::npos) + { + return nullptr; + } + std::string topName = fullName.substr(0, pos); + if (topName != var.name) + { + return nullptr; + } + std::string fieldName = fullName.substr(pos + 1); + if (fieldName.empty()) + { + return nullptr; + } + for (size_t field = 0; field < var.fields.size(); ++field) + { + if (var.fields[field].name == fieldName) + { + *fieldIndexOut = static_cast<GLuint>(field); + return &var.fields[field]; + } + } + return nullptr; +} + +unsigned int ArraySizeProduct(const std::vector<unsigned int> &arraySizes) +{ + unsigned int arraySizeProduct = 1u; + for (unsigned int arraySize : arraySizes) + { + arraySizeProduct *= arraySize; + } + return arraySizeProduct; +} + +unsigned int ParseArrayIndex(const std::string &name, size_t *nameLengthWithoutArrayIndexOut) +{ + ASSERT(nameLengthWithoutArrayIndexOut != nullptr); + + // Strip any trailing array operator and retrieve the subscript + size_t open = name.find_last_of('['); + if (open != std::string::npos && name.back() == ']') + { + bool indexIsValidDecimalNumber = true; + for (size_t i = open + 1; i < name.length() - 1u; ++i) + { + if (!isdigit(name[i])) + { + indexIsValidDecimalNumber = false; + break; + } + } + if (indexIsValidDecimalNumber) + { + errno = 0; // reset global error flag. + unsigned long subscript = + strtoul(name.c_str() + open + 1, /*endptr*/ nullptr, /*radix*/ 10); + + // Check if resulting integer is out-of-range or conversion error. + if ((subscript <= static_cast<unsigned long>(UINT_MAX)) && + !(subscript == ULONG_MAX && errno == ERANGE) && !(errno != 0 && subscript == 0)) + { + *nameLengthWithoutArrayIndexOut = open; + return static_cast<unsigned int>(subscript); + } + } + } + + *nameLengthWithoutArrayIndexOut = name.length(); + return GL_INVALID_INDEX; +} + +const char *GetGenericErrorMessage(GLenum error) +{ + switch (error) + { + case GL_NO_ERROR: + return ""; + case GL_INVALID_ENUM: + return "Invalid enum."; + case GL_INVALID_VALUE: + return "Invalid value."; + case GL_INVALID_OPERATION: + return "Invalid operation."; + case GL_STACK_OVERFLOW: + return "Stack overflow."; + case GL_STACK_UNDERFLOW: + return "Stack underflow."; + case GL_OUT_OF_MEMORY: + return "Out of memory."; + case GL_INVALID_FRAMEBUFFER_OPERATION: + return "Invalid framebuffer operation."; + default: + UNREACHABLE(); + return "Unknown error."; + } +} + +unsigned int ElementTypeSize(GLenum elementType) +{ + switch (elementType) + { + case GL_UNSIGNED_BYTE: + return sizeof(GLubyte); + case GL_UNSIGNED_SHORT: + return sizeof(GLushort); + case GL_UNSIGNED_INT: + return sizeof(GLuint); + default: + UNREACHABLE(); + return 0; + } +} + +PipelineType GetPipelineType(ShaderType type) +{ + switch (type) + { + case ShaderType::Vertex: + case ShaderType::Fragment: + case ShaderType::Geometry: + return PipelineType::GraphicsPipeline; + case ShaderType::Compute: + return PipelineType::ComputePipeline; + default: + UNREACHABLE(); + return PipelineType::GraphicsPipeline; + } +} + +} // namespace gl + +namespace egl +{ +static_assert(EGL_GL_TEXTURE_CUBE_MAP_NEGATIVE_X_KHR - EGL_GL_TEXTURE_CUBE_MAP_POSITIVE_X_KHR == 1, + "Unexpected EGL cube map enum value."); +static_assert(EGL_GL_TEXTURE_CUBE_MAP_POSITIVE_Y_KHR - EGL_GL_TEXTURE_CUBE_MAP_POSITIVE_X_KHR == 2, + "Unexpected EGL cube map enum value."); +static_assert(EGL_GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_KHR - EGL_GL_TEXTURE_CUBE_MAP_POSITIVE_X_KHR == 3, + "Unexpected EGL cube map enum value."); +static_assert(EGL_GL_TEXTURE_CUBE_MAP_POSITIVE_Z_KHR - EGL_GL_TEXTURE_CUBE_MAP_POSITIVE_X_KHR == 4, + "Unexpected EGL cube map enum value."); +static_assert(EGL_GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_KHR - EGL_GL_TEXTURE_CUBE_MAP_POSITIVE_X_KHR == 5, + "Unexpected EGL cube map enum value."); + +bool IsCubeMapTextureTarget(EGLenum target) +{ + return (target >= FirstCubeMapTextureTarget && target <= LastCubeMapTextureTarget); +} + +size_t CubeMapTextureTargetToLayerIndex(EGLenum target) +{ + ASSERT(IsCubeMapTextureTarget(target)); + return target - static_cast<size_t>(FirstCubeMapTextureTarget); +} + +EGLenum LayerIndexToCubeMapTextureTarget(size_t index) +{ + ASSERT(index <= (LastCubeMapTextureTarget - FirstCubeMapTextureTarget)); + return FirstCubeMapTextureTarget + static_cast<GLenum>(index); +} + +bool IsTextureTarget(EGLenum target) +{ + switch (target) + { + case EGL_GL_TEXTURE_2D_KHR: + case EGL_GL_TEXTURE_CUBE_MAP_POSITIVE_X_KHR: + case EGL_GL_TEXTURE_CUBE_MAP_NEGATIVE_X_KHR: + case EGL_GL_TEXTURE_CUBE_MAP_POSITIVE_Y_KHR: + case EGL_GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_KHR: + case EGL_GL_TEXTURE_CUBE_MAP_POSITIVE_Z_KHR: + case EGL_GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_KHR: + case EGL_GL_TEXTURE_3D_KHR: + return true; + + default: + return false; + } +} + +bool IsRenderbufferTarget(EGLenum target) +{ + return target == EGL_GL_RENDERBUFFER_KHR; +} + +bool IsExternalImageTarget(EGLenum target) +{ + switch (target) + { + case EGL_NATIVE_BUFFER_ANDROID: + case EGL_D3D11_TEXTURE_ANGLE: + return true; + + default: + return false; + } +} + +const char *GetGenericErrorMessage(EGLint error) +{ + switch (error) + { + case EGL_SUCCESS: + return ""; + case EGL_NOT_INITIALIZED: + return "Not initialized."; + case EGL_BAD_ACCESS: + return "Bad access."; + case EGL_BAD_ALLOC: + return "Bad allocation."; + case EGL_BAD_ATTRIBUTE: + return "Bad attribute."; + case EGL_BAD_CONFIG: + return "Bad config."; + case EGL_BAD_CONTEXT: + return "Bad context."; + case EGL_BAD_CURRENT_SURFACE: + return "Bad current surface."; + case EGL_BAD_DISPLAY: + return "Bad display."; + case EGL_BAD_MATCH: + return "Bad match."; + case EGL_BAD_NATIVE_WINDOW: + return "Bad native window."; + case EGL_BAD_PARAMETER: + return "Bad parameter."; + case EGL_BAD_SURFACE: + return "Bad surface."; + case EGL_CONTEXT_LOST: + return "Context lost."; + case EGL_BAD_STREAM_KHR: + return "Bad stream."; + case EGL_BAD_STATE_KHR: + return "Bad state."; + case EGL_BAD_DEVICE_EXT: + return "Bad device."; + default: + UNREACHABLE(); + return "Unknown error."; + } +} + +} // namespace egl + +namespace egl_gl +{ +GLuint EGLClientBufferToGLObjectHandle(EGLClientBuffer buffer) +{ + return static_cast<GLuint>(reinterpret_cast<uintptr_t>(buffer)); +} +} // namespace egl_gl + +namespace gl_egl +{ +EGLenum GLComponentTypeToEGLColorComponentType(GLenum glComponentType) +{ + switch (glComponentType) + { + case GL_FLOAT: + return EGL_COLOR_COMPONENT_TYPE_FLOAT_EXT; + + case GL_UNSIGNED_NORMALIZED: + return EGL_COLOR_COMPONENT_TYPE_FIXED_EXT; + + default: + UNREACHABLE(); + return EGL_NONE; + } +} + +EGLClientBuffer GLObjectHandleToEGLClientBuffer(GLuint handle) +{ + return reinterpret_cast<EGLClientBuffer>(static_cast<uintptr_t>(handle)); +} + +} // namespace gl_egl + +#if !defined(ANGLE_ENABLE_WINDOWS_STORE) +std::string getTempPath() +{ +# ifdef ANGLE_PLATFORM_WINDOWS + char path[MAX_PATH]; + DWORD pathLen = GetTempPathA(sizeof(path) / sizeof(path[0]), path); + if (pathLen == 0) + { + UNREACHABLE(); + return std::string(); + } + + UINT unique = GetTempFileNameA(path, "sh", 0, path); + if (unique == 0) + { + UNREACHABLE(); + return std::string(); + } + + return path; +# else + UNIMPLEMENTED(); + return ""; +# endif +} + +void writeFile(const char *path, const void *content, size_t size) +{ + FILE *file = fopen(path, "w"); + if (!file) + { + UNREACHABLE(); + return; + } + + fwrite(content, sizeof(char), size, file); + fclose(file); +} +#endif // !ANGLE_ENABLE_WINDOWS_STORE + +#if defined(ANGLE_PLATFORM_WINDOWS) + +// Causes the thread to relinquish the remainder of its time slice to any +// other thread that is ready to run.If there are no other threads ready +// to run, the function returns immediately, and the thread continues execution. +void ScheduleYield() +{ + Sleep(0); +} + +#endif diff --git a/gfx/angle/checkout/src/common/utilities.h b/gfx/angle/checkout/src/common/utilities.h new file mode 100644 index 0000000000..9cacac6286 --- /dev/null +++ b/gfx/angle/checkout/src/common/utilities.h @@ -0,0 +1,253 @@ +// +// Copyright (c) 2002-2013 The ANGLE Project Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// + +// utilities.h: Conversion functions and other utility routines. + +#ifndef COMMON_UTILITIES_H_ +#define COMMON_UTILITIES_H_ + +#include <EGL/egl.h> +#include <EGL/eglext.h> + +#include <math.h> +#include <string> +#include <vector> + +#include "angle_gl.h" + +#include "common/PackedEnums.h" +#include "common/mathutil.h" +#include "common/platform.h" + +namespace sh +{ +struct ShaderVariable; +} + +namespace gl +{ + +int VariableComponentCount(GLenum type); +GLenum VariableComponentType(GLenum type); +size_t VariableComponentSize(GLenum type); +size_t VariableInternalSize(GLenum type); +size_t VariableExternalSize(GLenum type); +int VariableRowCount(GLenum type); +int VariableColumnCount(GLenum type); +bool IsSamplerType(GLenum type); +bool IsImageType(GLenum type); +bool IsImage2DType(GLenum type); +bool IsAtomicCounterType(GLenum type); +bool IsOpaqueType(GLenum type); +bool IsMatrixType(GLenum type); +GLenum TransposeMatrixType(GLenum type); +int VariableRegisterCount(GLenum type); +int MatrixRegisterCount(GLenum type, bool isRowMajorMatrix); +int MatrixComponentCount(GLenum type, bool isRowMajorMatrix); +int VariableSortOrder(GLenum type); +GLenum VariableBoolVectorType(GLenum type); + +int AllocateFirstFreeBits(unsigned int *bits, unsigned int allocationSize, unsigned int bitsSize); + +// Parse the base resource name and array indices. Returns the base name of the resource. +// If the provided name doesn't index an array, the outSubscripts vector will be empty. +// If the provided name indexes an array, the outSubscripts vector will contain indices with +// outermost array indices in the back. If an array index is invalid, GL_INVALID_INDEX is added to +// outSubscripts. +std::string ParseResourceName(const std::string &name, std::vector<unsigned int> *outSubscripts); + +// Find the child field which matches 'fullName' == var.name + "." + field.name. +// Return nullptr if not found. +const sh::ShaderVariable *FindShaderVarField(const sh::ShaderVariable &var, + const std::string &fullName, + GLuint *fieldIndexOut); + +// Find the range of index values in the provided indices pointer. Primitive restart indices are +// only counted in the range if primitive restart is disabled. +IndexRange ComputeIndexRange(DrawElementsType indexType, + const GLvoid *indices, + size_t count, + bool primitiveRestartEnabled); + +// Get the primitive restart index value for the given index type. +GLuint GetPrimitiveRestartIndex(DrawElementsType indexType); + +// Get the primitive restart index value with the given C++ type. +template <typename T> +constexpr T GetPrimitiveRestartIndexFromType() +{ + return std::numeric_limits<T>::max(); +} + +static_assert(GetPrimitiveRestartIndexFromType<uint8_t>() == 0xFF, + "verify restart index for uint8_t values"); +static_assert(GetPrimitiveRestartIndexFromType<uint16_t>() == 0xFFFF, + "verify restart index for uint8_t values"); +static_assert(GetPrimitiveRestartIndexFromType<uint32_t>() == 0xFFFFFFFF, + "verify restart index for uint8_t values"); + +bool IsTriangleMode(PrimitiveMode drawMode); + +namespace priv +{ +extern const angle::PackedEnumMap<PrimitiveMode, bool> gLineModes; +} // namespace priv + +ANGLE_INLINE bool IsLineMode(PrimitiveMode primitiveMode) +{ + return priv::gLineModes[primitiveMode]; +} + +bool IsIntegerFormat(GLenum unsizedFormat); + +// Returns the product of the sizes in the vector, or 1 if the vector is empty. Doesn't currently +// perform overflow checks. +unsigned int ArraySizeProduct(const std::vector<unsigned int> &arraySizes); + +// Return the array index at the end of name, and write the length of name before the final array +// index into nameLengthWithoutArrayIndexOut. In case name doesn't include an array index, return +// GL_INVALID_INDEX and write the length of the original string. +unsigned int ParseArrayIndex(const std::string &name, size_t *nameLengthWithoutArrayIndexOut); + +enum class SamplerFormat : uint8_t +{ + Float = 0, + Unsigned = 1, + Signed = 2, + Shadow = 3, + + InvalidEnum = 4, + EnumCount = 4, +}; + +struct UniformTypeInfo final : angle::NonCopyable +{ + inline constexpr UniformTypeInfo(GLenum type, + GLenum componentType, + GLenum textureType, + GLenum transposedMatrixType, + GLenum boolVectorType, + SamplerFormat samplerFormat, + int rowCount, + int columnCount, + int componentCount, + size_t componentSize, + size_t internalSize, + size_t externalSize, + bool isSampler, + bool isMatrixType, + bool isImageType, + const char *glslAsFloat); + + GLenum type; + GLenum componentType; + GLenum textureType; + GLenum transposedMatrixType; + GLenum boolVectorType; + SamplerFormat samplerFormat; + int rowCount; + int columnCount; + int componentCount; + size_t componentSize; + size_t internalSize; + size_t externalSize; + bool isSampler; + bool isMatrixType; + bool isImageType; + const char *glslAsFloat; +}; + +inline constexpr UniformTypeInfo::UniformTypeInfo(GLenum type, + GLenum componentType, + GLenum textureType, + GLenum transposedMatrixType, + GLenum boolVectorType, + SamplerFormat samplerFormat, + int rowCount, + int columnCount, + int componentCount, + size_t componentSize, + size_t internalSize, + size_t externalSize, + bool isSampler, + bool isMatrixType, + bool isImageType, + const char *glslAsFloat) + : type(type), + componentType(componentType), + textureType(textureType), + transposedMatrixType(transposedMatrixType), + boolVectorType(boolVectorType), + samplerFormat(samplerFormat), + rowCount(rowCount), + columnCount(columnCount), + componentCount(componentCount), + componentSize(componentSize), + internalSize(internalSize), + externalSize(externalSize), + isSampler(isSampler), + isMatrixType(isMatrixType), + isImageType(isImageType), + glslAsFloat(glslAsFloat) +{} + +const UniformTypeInfo &GetUniformTypeInfo(GLenum uniformType); + +const char *GetGenericErrorMessage(GLenum error); + +unsigned int ElementTypeSize(GLenum elementType); + +template <typename T> +T GetClampedVertexCount(size_t vertexCount) +{ + static constexpr size_t kMax = static_cast<size_t>(std::numeric_limits<T>::max()); + return static_cast<T>(vertexCount > kMax ? kMax : vertexCount); +} + +enum class PipelineType +{ + GraphicsPipeline = 0, + ComputePipeline = 1, +}; + +PipelineType GetPipelineType(ShaderType shaderType); +} // namespace gl + +namespace egl +{ +static const EGLenum FirstCubeMapTextureTarget = EGL_GL_TEXTURE_CUBE_MAP_POSITIVE_X_KHR; +static const EGLenum LastCubeMapTextureTarget = EGL_GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_KHR; +bool IsCubeMapTextureTarget(EGLenum target); +size_t CubeMapTextureTargetToLayerIndex(EGLenum target); +EGLenum LayerIndexToCubeMapTextureTarget(size_t index); +bool IsTextureTarget(EGLenum target); +bool IsRenderbufferTarget(EGLenum target); +bool IsExternalImageTarget(EGLenum target); + +const char *GetGenericErrorMessage(EGLint error); +} // namespace egl + +namespace egl_gl +{ +GLuint EGLClientBufferToGLObjectHandle(EGLClientBuffer buffer); +} + +namespace gl_egl +{ +EGLenum GLComponentTypeToEGLColorComponentType(GLenum glComponentType); +EGLClientBuffer GLObjectHandleToEGLClientBuffer(GLuint handle); +} // namespace gl_egl + +#if !defined(ANGLE_ENABLE_WINDOWS_STORE) +std::string getTempPath(); +void writeFile(const char *path, const void *data, size_t size); +#endif + +#if defined(ANGLE_PLATFORM_WINDOWS) +void ScheduleYield(); +#endif + +#endif // COMMON_UTILITIES_H_ diff --git a/gfx/angle/checkout/src/common/vector_utils.h b/gfx/angle/checkout/src/common/vector_utils.h new file mode 100644 index 0000000000..d23a836288 --- /dev/null +++ b/gfx/angle/checkout/src/common/vector_utils.h @@ -0,0 +1,523 @@ +// +// Copyright 2016 The ANGLE Project Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// +// vector_utils.h: Utility classes implementing various vector operations + +#ifndef COMMON_VECTOR_UTILS_H_ +#define COMMON_VECTOR_UTILS_H_ + +#include <cmath> +#include <cstddef> +#include <ostream> +#include <type_traits> + +namespace angle +{ + +template <size_t Dimension, typename Type> +class Vector; + +using Vector2 = Vector<2, float>; +using Vector3 = Vector<3, float>; +using Vector4 = Vector<4, float>; + +using Vector2I = Vector<2, int>; +using Vector3I = Vector<3, int>; +using Vector4I = Vector<4, int>; + +using Vector2U = Vector<2, unsigned int>; +using Vector3U = Vector<3, unsigned int>; +using Vector4U = Vector<4, unsigned int>; + +template <size_t Dimension, typename Type> +class VectorBase +{ + public: + using VectorN = Vector<Dimension, Type>; + + // Constructors + VectorBase() = default; + explicit VectorBase(Type element); + + template <typename Type2> + VectorBase(const VectorBase<Dimension, Type2> &other); + + template <typename Arg1, typename Arg2, typename... Args> + VectorBase(const Arg1 &arg1, const Arg2 &arg2, const Args &... args); + + // Access the vector backing storage directly + const Type *data() const { return mData; } + Type *data() { return mData; } + constexpr size_t size() const { return Dimension; } + + // Load or store the pointer from / to raw data + static VectorN Load(const Type *source); + static void Store(const VectorN &source, Type *destination); + + // Index the vector + Type &operator[](size_t i) { return mData[i]; } + const Type &operator[](size_t i) const { return mData[i]; } + + // Basic arithmetic operations + VectorN operator+() const; + VectorN operator-() const; + VectorN operator+(const VectorN &other) const; + VectorN operator-(const VectorN &other) const; + VectorN operator*(const VectorN &other) const; + VectorN operator/(const VectorN &other) const; + VectorN operator*(Type other) const; + VectorN operator/(Type other) const; + friend VectorN operator*(Type a, const VectorN &b) { return b * a; } + + // Compound arithmetic operations + VectorN &operator+=(const VectorN &other); + VectorN &operator-=(const VectorN &other); + VectorN &operator*=(const VectorN &other); + VectorN &operator/=(const VectorN &other); + VectorN &operator*=(Type other); + VectorN &operator/=(Type other); + + // Comparison operators + bool operator==(const VectorN &other) const; + bool operator!=(const VectorN &other) const; + + // Other arithmetic operations + Type length() const; + Type lengthSquared() const; + Type dot(const VectorBase<Dimension, Type> &other) const; + VectorN normalized() const; + + protected: + template <size_t CurrentIndex, size_t OtherDimension, typename OtherType, typename... Args> + void initWithList(const Vector<OtherDimension, OtherType> &arg1, const Args &... args); + + // Some old compilers consider this function an alternative for initWithList(Vector) + // when the variant above is more precise. Use SFINAE on the return value to hide + // this variant for non-arithmetic types. The return value is still void. + template <size_t CurrentIndex, typename OtherType, typename... Args> + typename std::enable_if<std::is_arithmetic<OtherType>::value>::type initWithList( + OtherType arg1, + const Args &... args); + + template <size_t CurrentIndex> + void initWithList() const; + + template <size_t Dimension2, typename Type2> + friend class VectorBase; + + Type mData[Dimension]; +}; + +template <size_t Dimension, typename Type> +std::ostream &operator<<(std::ostream &ostream, const VectorBase<Dimension, Type> &vector); + +template <typename Type> +class Vector<2, Type> : public VectorBase<2, Type> +{ + public: + // Import the constructors defined in VectorBase + using VectorBase<2, Type>::VectorBase; + + // Element shorthands + Type &x() { return this->mData[0]; } + Type &y() { return this->mData[1]; } + + const Type &x() const { return this->mData[0]; } + const Type &y() const { return this->mData[1]; } +}; + +template <typename Type> +std::ostream &operator<<(std::ostream &ostream, const Vector<2, Type> &vector); + +template <typename Type> +class Vector<3, Type> : public VectorBase<3, Type> +{ + public: + // Import the constructors defined in VectorBase + using VectorBase<3, Type>::VectorBase; + + // Additional operations + Vector<3, Type> cross(const Vector<3, Type> &other) const; + + // Element shorthands + Type &x() { return this->mData[0]; } + Type &y() { return this->mData[1]; } + Type &z() { return this->mData[2]; } + + const Type &x() const { return this->mData[0]; } + const Type &y() const { return this->mData[1]; } + const Type &z() const { return this->mData[2]; } +}; + +template <typename Type> +std::ostream &operator<<(std::ostream &ostream, const Vector<3, Type> &vector); + +template <typename Type> +class Vector<4, Type> : public VectorBase<4, Type> +{ + public: + // Import the constructors defined in VectorBase + using VectorBase<4, Type>::VectorBase; + + // Element shorthands + Type &x() { return this->mData[0]; } + Type &y() { return this->mData[1]; } + Type &z() { return this->mData[2]; } + Type &w() { return this->mData[3]; } + + const Type &x() const { return this->mData[0]; } + const Type &y() const { return this->mData[1]; } + const Type &z() const { return this->mData[2]; } + const Type &w() const { return this->mData[3]; } +}; + +template <typename Type> +std::ostream &operator<<(std::ostream &ostream, const Vector<4, Type> &vector); + +// Implementation of constructors and misc operations + +template <size_t Dimension, typename Type> +VectorBase<Dimension, Type>::VectorBase(Type element) +{ + for (size_t i = 0; i < Dimension; ++i) + { + mData[i] = element; + } +} + +template <size_t Dimension, typename Type> +template <typename Type2> +VectorBase<Dimension, Type>::VectorBase(const VectorBase<Dimension, Type2> &other) +{ + for (size_t i = 0; i < Dimension; ++i) + { + mData[i] = static_cast<Type>(other.mData[i]); + } +} + +// Ideally we would like to have only two constructors: +// - a scalar constructor that takes Type as a parameter +// - a compound constructor +// However if we define the compound constructor for when it has a single arguments, then calling +// Vector2(0.0) will be ambiguous. To solve this we take advantage of there being a single compound +// constructor with a single argument, which is the copy constructor. We end up with three +// constructors: +// - the scalar constructor +// - the copy constructor +// - the compound constructor for two or more arguments, hence the arg1, and arg2 here. +template <size_t Dimension, typename Type> +template <typename Arg1, typename Arg2, typename... Args> +VectorBase<Dimension, Type>::VectorBase(const Arg1 &arg1, const Arg2 &arg2, const Args &... args) +{ + initWithList<0>(arg1, arg2, args...); +} + +template <size_t Dimension, typename Type> +template <size_t CurrentIndex, size_t OtherDimension, typename OtherType, typename... Args> +void VectorBase<Dimension, Type>::initWithList(const Vector<OtherDimension, OtherType> &arg1, + const Args &... args) +{ + static_assert(CurrentIndex + OtherDimension <= Dimension, + "Too much data in the vector constructor."); + for (size_t i = 0; i < OtherDimension; ++i) + { + mData[CurrentIndex + i] = static_cast<Type>(arg1.mData[i]); + } + initWithList<CurrentIndex + OtherDimension>(args...); +} + +template <size_t Dimension, typename Type> +template <size_t CurrentIndex, typename OtherType, typename... Args> +typename std::enable_if<std::is_arithmetic<OtherType>::value>::type +VectorBase<Dimension, Type>::initWithList(OtherType arg1, const Args &... args) +{ + static_assert(CurrentIndex + 1 <= Dimension, "Too much data in the vector constructor."); + mData[CurrentIndex] = static_cast<Type>(arg1); + initWithList<CurrentIndex + 1>(args...); +} + +template <size_t Dimension, typename Type> +template <size_t CurrentIndex> +void VectorBase<Dimension, Type>::initWithList() const +{ + static_assert(CurrentIndex == Dimension, "Not enough data in the vector constructor."); +} + +template <size_t Dimension, typename Type> +Vector<Dimension, Type> VectorBase<Dimension, Type>::Load(const Type *source) +{ + Vector<Dimension, Type> result; + for (size_t i = 0; i < Dimension; ++i) + { + result.mData[i] = source[i]; + } + return result; +} + +template <size_t Dimension, typename Type> +void VectorBase<Dimension, Type>::Store(const Vector<Dimension, Type> &source, Type *destination) +{ + for (size_t i = 0; i < Dimension; ++i) + { + destination[i] = source.mData[i]; + } +} + +// Implementation of basic arithmetic operations +template <size_t Dimension, typename Type> +Vector<Dimension, Type> VectorBase<Dimension, Type>::operator+() const +{ + Vector<Dimension, Type> result; + for (size_t i = 0; i < Dimension; ++i) + { + result.mData[i] = +mData[i]; + } + return result; +} + +template <size_t Dimension, typename Type> +Vector<Dimension, Type> VectorBase<Dimension, Type>::operator-() const +{ + Vector<Dimension, Type> result; + for (size_t i = 0; i < Dimension; ++i) + { + result.mData[i] = -mData[i]; + } + return result; +} + +template <size_t Dimension, typename Type> +Vector<Dimension, Type> VectorBase<Dimension, Type>::operator+( + const Vector<Dimension, Type> &other) const +{ + Vector<Dimension, Type> result; + for (size_t i = 0; i < Dimension; ++i) + { + result.mData[i] = mData[i] + other.mData[i]; + } + return result; +} + +template <size_t Dimension, typename Type> +Vector<Dimension, Type> VectorBase<Dimension, Type>::operator-( + const Vector<Dimension, Type> &other) const +{ + Vector<Dimension, Type> result; + for (size_t i = 0; i < Dimension; ++i) + { + result.mData[i] = mData[i] - other.mData[i]; + } + return result; +} + +template <size_t Dimension, typename Type> +Vector<Dimension, Type> VectorBase<Dimension, Type>::operator*( + const Vector<Dimension, Type> &other) const +{ + Vector<Dimension, Type> result; + for (size_t i = 0; i < Dimension; ++i) + { + result.mData[i] = mData[i] * other.mData[i]; + } + return result; +} + +template <size_t Dimension, typename Type> +Vector<Dimension, Type> VectorBase<Dimension, Type>::operator/( + const Vector<Dimension, Type> &other) const +{ + Vector<Dimension, Type> result; + for (size_t i = 0; i < Dimension; ++i) + { + result.mData[i] = mData[i] / other.mData[i]; + } + return result; +} + +template <size_t Dimension, typename Type> +Vector<Dimension, Type> VectorBase<Dimension, Type>::operator*(Type other) const +{ + Vector<Dimension, Type> result; + for (size_t i = 0; i < Dimension; ++i) + { + result.mData[i] = mData[i] * other; + } + return result; +} + +template <size_t Dimension, typename Type> +Vector<Dimension, Type> VectorBase<Dimension, Type>::operator/(Type other) const +{ + Vector<Dimension, Type> result; + for (size_t i = 0; i < Dimension; ++i) + { + result.mData[i] = mData[i] / other; + } + return result; +} + +// Implementation of compound arithmetic operations +template <size_t Dimension, typename Type> +Vector<Dimension, Type> &VectorBase<Dimension, Type>::operator+=( + const Vector<Dimension, Type> &other) +{ + for (size_t i = 0; i < Dimension; ++i) + { + mData[i] += other.mData[i]; + } + return *static_cast<Vector<Dimension, Type> *>(this); +} + +template <size_t Dimension, typename Type> +Vector<Dimension, Type> &VectorBase<Dimension, Type>::operator-=( + const Vector<Dimension, Type> &other) +{ + for (size_t i = 0; i < Dimension; ++i) + { + mData[i] -= other.mData[i]; + } + return *static_cast<Vector<Dimension, Type> *>(this); +} + +template <size_t Dimension, typename Type> +Vector<Dimension, Type> &VectorBase<Dimension, Type>::operator*=( + const Vector<Dimension, Type> &other) +{ + for (size_t i = 0; i < Dimension; ++i) + { + mData[i] *= other.mData[i]; + } + return *static_cast<Vector<Dimension, Type> *>(this); +} + +template <size_t Dimension, typename Type> +Vector<Dimension, Type> &VectorBase<Dimension, Type>::operator/=( + const Vector<Dimension, Type> &other) +{ + for (size_t i = 0; i < Dimension; ++i) + { + mData[i] /= other.mData[i]; + } + return *static_cast<Vector<Dimension, Type> *>(this); +} + +template <size_t Dimension, typename Type> +Vector<Dimension, Type> &VectorBase<Dimension, Type>::operator*=(Type other) +{ + for (size_t i = 0; i < Dimension; ++i) + { + mData[i] *= other; + } + return *static_cast<Vector<Dimension, Type> *>(this); +} + +template <size_t Dimension, typename Type> +Vector<Dimension, Type> &VectorBase<Dimension, Type>::operator/=(Type other) +{ + for (size_t i = 0; i < Dimension; ++i) + { + mData[i] /= other; + } + return *static_cast<Vector<Dimension, Type> *>(this); +} + +// Implementation of comparison operators +template <size_t Dimension, typename Type> +bool VectorBase<Dimension, Type>::operator==(const Vector<Dimension, Type> &other) const +{ + for (size_t i = 0; i < Dimension; ++i) + { + if (mData[i] != other.mData[i]) + { + return false; + } + } + return true; +} + +template <size_t Dimension, typename Type> +bool VectorBase<Dimension, Type>::operator!=(const Vector<Dimension, Type> &other) const +{ + return !(*this == other); +} + +// Implementation of other arithmetic operations +template <size_t Dimension, typename Type> +Type VectorBase<Dimension, Type>::length() const +{ + static_assert(std::is_floating_point<Type>::value, + "VectorN::length is only defined for floating point vectors"); + return std::sqrt(lengthSquared()); +} + +template <size_t Dimension, typename Type> +Type VectorBase<Dimension, Type>::lengthSquared() const +{ + return dot(*this); +} + +template <size_t Dimension, typename Type> +Type VectorBase<Dimension, Type>::dot(const VectorBase<Dimension, Type> &other) const +{ + Type sum = Type(); + for (size_t i = 0; i < Dimension; ++i) + { + sum += mData[i] * other.mData[i]; + } + return sum; +} + +template <size_t Dimension, typename Type> +std::ostream &operator<<(std::ostream &ostream, const VectorBase<Dimension, Type> &vector) +{ + ostream << "[ "; + for (size_t elementIdx = 0; elementIdx < Dimension; elementIdx++) + { + if (elementIdx > 0) + { + ostream << ", "; + } + ostream << vector.data()[elementIdx]; + } + ostream << " ]"; + return ostream; +} + +template <size_t Dimension, typename Type> +Vector<Dimension, Type> VectorBase<Dimension, Type>::normalized() const +{ + static_assert(std::is_floating_point<Type>::value, + "VectorN::normalized is only defined for floating point vectors"); + return *this / length(); +} + +template <typename Type> +std::ostream &operator<<(std::ostream &ostream, const Vector<2, Type> &vector) +{ + return ostream << static_cast<const VectorBase<2, Type> &>(vector); +} + +template <typename Type> +Vector<3, Type> Vector<3, Type>::cross(const Vector<3, Type> &other) const +{ + return Vector<3, Type>(y() * other.z() - z() * other.y(), z() * other.x() - x() * other.z(), + x() * other.y() - y() * other.x()); +} + +template <typename Type> +std::ostream &operator<<(std::ostream &ostream, const Vector<3, Type> &vector) +{ + return ostream << static_cast<const VectorBase<3, Type> &>(vector); +} + +template <typename Type> +std::ostream &operator<<(std::ostream &ostream, const Vector<4, Type> &vector) +{ + return ostream << static_cast<const VectorBase<4, Type> &>(vector); +} + +} // namespace angle + +#endif // COMMON_VECTOR_UTILS_H_ diff --git a/gfx/angle/checkout/src/common/version.h b/gfx/angle/checkout/src/common/version.h new file mode 100644 index 0000000000..ff96e59919 --- /dev/null +++ b/gfx/angle/checkout/src/common/version.h @@ -0,0 +1,27 @@ +// +// Copyright (c) 2014 The ANGLE Project Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// + +#ifndef COMMON_VERSION_H_ +#define COMMON_VERSION_H_ + +#include "id/commit.h" + +#define ANGLE_MAJOR_VERSION 2 +#define ANGLE_MINOR_VERSION 1 + +#ifndef ANGLE_REVISION +# define ANGLE_REVISION 0 +#endif + +#define ANGLE_STRINGIFY(x) #x +#define ANGLE_MACRO_STRINGIFY(x) ANGLE_STRINGIFY(x) + +#define ANGLE_VERSION_STRING \ + ANGLE_MACRO_STRINGIFY(ANGLE_MAJOR_VERSION) \ + "." ANGLE_MACRO_STRINGIFY(ANGLE_MINOR_VERSION) "." ANGLE_MACRO_STRINGIFY( \ + ANGLE_REVISION) "." ANGLE_COMMIT_HASH + +#endif // COMMON_VERSION_H_ |