summaryrefslogtreecommitdiffstats
path: root/gfx/skia/skia/include/private
diff options
context:
space:
mode:
Diffstat (limited to 'gfx/skia/skia/include/private')
-rw-r--r--gfx/skia/skia/include/private/SkBitmaskEnum.h59
-rw-r--r--gfx/skia/skia/include/private/SkChecksum.h81
-rw-r--r--gfx/skia/skia/include/private/SkColorData.h386
-rw-r--r--gfx/skia/skia/include/private/SkEncodedInfo.h272
-rw-r--r--gfx/skia/skia/include/private/SkGainmapInfo.h92
-rw-r--r--gfx/skia/skia/include/private/SkGainmapShader.h53
-rw-r--r--gfx/skia/skia/include/private/SkIDChangeListener.h76
-rw-r--r--gfx/skia/skia/include/private/SkJpegGainmapEncoder.h71
-rw-r--r--gfx/skia/skia/include/private/SkOpts_spi.h23
-rw-r--r--gfx/skia/skia/include/private/SkPathRef.h539
-rw-r--r--gfx/skia/skia/include/private/SkSLDefines.h64
-rw-r--r--gfx/skia/skia/include/private/SkSLIRNode.h145
-rw-r--r--gfx/skia/skia/include/private/SkSLLayout.h93
-rw-r--r--gfx/skia/skia/include/private/SkSLModifiers.h178
-rw-r--r--gfx/skia/skia/include/private/SkSLProgramElement.h41
-rw-r--r--gfx/skia/skia/include/private/SkSLProgramKind.h36
-rw-r--r--gfx/skia/skia/include/private/SkSLSampleUsage.h85
-rw-r--r--gfx/skia/skia/include/private/SkSLStatement.h44
-rw-r--r--gfx/skia/skia/include/private/SkSLString.h59
-rw-r--r--gfx/skia/skia/include/private/SkSLSymbol.h63
-rw-r--r--gfx/skia/skia/include/private/SkShadowFlags.h27
-rw-r--r--gfx/skia/skia/include/private/SkSpinlock.h57
-rw-r--r--gfx/skia/skia/include/private/SkWeakRefCnt.h173
-rw-r--r--gfx/skia/skia/include/private/base/README.md4
-rw-r--r--gfx/skia/skia/include/private/base/SingleOwner.h75
-rw-r--r--gfx/skia/skia/include/private/base/SkAPI.h52
-rw-r--r--gfx/skia/skia/include/private/base/SkAlign.h39
-rw-r--r--gfx/skia/skia/include/private/base/SkAlignedStorage.h32
-rw-r--r--gfx/skia/skia/include/private/base/SkAssert.h93
-rw-r--r--gfx/skia/skia/include/private/base/SkAttributes.h89
-rw-r--r--gfx/skia/skia/include/private/base/SkCPUTypes.h25
-rw-r--r--gfx/skia/skia/include/private/base/SkContainers.h46
-rw-r--r--gfx/skia/skia/include/private/base/SkDebug.h27
-rw-r--r--gfx/skia/skia/include/private/base/SkDeque.h143
-rw-r--r--gfx/skia/skia/include/private/base/SkFeatures.h151
-rw-r--r--gfx/skia/skia/include/private/base/SkFixed.h143
-rw-r--r--gfx/skia/skia/include/private/base/SkFloatBits.h90
-rw-r--r--gfx/skia/skia/include/private/base/SkFloatingPoint.h247
-rw-r--r--gfx/skia/skia/include/private/base/SkLoadUserConfig.h58
-rw-r--r--gfx/skia/skia/include/private/base/SkMacros.h107
-rw-r--r--gfx/skia/skia/include/private/base/SkMalloc.h144
-rw-r--r--gfx/skia/skia/include/private/base/SkMath.h77
-rw-r--r--gfx/skia/skia/include/private/base/SkMutex.h64
-rw-r--r--gfx/skia/skia/include/private/base/SkNoncopyable.h30
-rw-r--r--gfx/skia/skia/include/private/base/SkOnce.h55
-rw-r--r--gfx/skia/skia/include/private/base/SkPathEnums.h25
-rw-r--r--gfx/skia/skia/include/private/base/SkSafe32.h49
-rw-r--r--gfx/skia/skia/include/private/base/SkSemaphore.h84
-rw-r--r--gfx/skia/skia/include/private/base/SkSpan_impl.h129
-rw-r--r--gfx/skia/skia/include/private/base/SkTArray.h696
-rw-r--r--gfx/skia/skia/include/private/base/SkTDArray.h236
-rw-r--r--gfx/skia/skia/include/private/base/SkTFitsIn.h105
-rw-r--r--gfx/skia/skia/include/private/base/SkTLogic.h56
-rw-r--r--gfx/skia/skia/include/private/base/SkTPin.h23
-rw-r--r--gfx/skia/skia/include/private/base/SkTemplates.h426
-rw-r--r--gfx/skia/skia/include/private/base/SkThreadAnnotations.h91
-rw-r--r--gfx/skia/skia/include/private/base/SkThreadID.h23
-rw-r--r--gfx/skia/skia/include/private/base/SkTo.h39
-rw-r--r--gfx/skia/skia/include/private/base/SkTypeTraits.h33
-rw-r--r--gfx/skia/skia/include/private/chromium/GrSlug.h16
-rw-r--r--gfx/skia/skia/include/private/chromium/GrVkSecondaryCBDrawContext.h130
-rw-r--r--gfx/skia/skia/include/private/chromium/SkChromeRemoteGlyphCache.h148
-rw-r--r--gfx/skia/skia/include/private/chromium/SkDiscardableMemory.h70
-rw-r--r--gfx/skia/skia/include/private/chromium/Slug.h67
-rw-r--r--gfx/skia/skia/include/private/gpu/ganesh/GrContext_Base.h100
-rw-r--r--gfx/skia/skia/include/private/gpu/ganesh/GrD3DTypesMinimal.h74
-rw-r--r--gfx/skia/skia/include/private/gpu/ganesh/GrDawnTypesPriv.h26
-rw-r--r--gfx/skia/skia/include/private/gpu/ganesh/GrGLTypesPriv.h108
-rw-r--r--gfx/skia/skia/include/private/gpu/ganesh/GrImageContext.h55
-rw-r--r--gfx/skia/skia/include/private/gpu/ganesh/GrMockTypesPriv.h32
-rw-r--r--gfx/skia/skia/include/private/gpu/ganesh/GrMtlTypesPriv.h75
-rw-r--r--gfx/skia/skia/include/private/gpu/ganesh/GrTypesPriv.h1042
-rw-r--r--gfx/skia/skia/include/private/gpu/ganesh/GrVkTypesPriv.h73
-rw-r--r--gfx/skia/skia/include/private/gpu/graphite/DawnTypesPriv.h38
-rw-r--r--gfx/skia/skia/include/private/gpu/graphite/MtlGraphiteTypesPriv.h74
-rw-r--r--gfx/skia/skia/include/private/gpu/graphite/VulkanGraphiteTypesPriv.h55
-rw-r--r--gfx/skia/skia/include/private/gpu/vk/SkiaVulkan.h36
-rw-r--r--gfx/skia/skia/include/private/gpu/vk/VulkanTypesPriv.h57
78 files changed, 8799 insertions, 0 deletions
diff --git a/gfx/skia/skia/include/private/SkBitmaskEnum.h b/gfx/skia/skia/include/private/SkBitmaskEnum.h
new file mode 100644
index 0000000000..b25045359d
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkBitmaskEnum.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkEnumOperators_DEFINED
+#define SkEnumOperators_DEFINED
+
+#include <type_traits>
+
+namespace sknonstd {
+template <typename T> struct is_bitmask_enum : std::false_type {};
+
+template <typename E>
+std::enable_if_t<sknonstd::is_bitmask_enum<E>::value, bool> constexpr Any(E e) {
+ return static_cast<std::underlying_type_t<E>>(e) != 0;
+}
+} // namespace sknonstd
+
+template <typename E>
+std::enable_if_t<sknonstd::is_bitmask_enum<E>::value, E> constexpr operator|(E l, E r) {
+ using U = std::underlying_type_t<E>;
+ return static_cast<E>(static_cast<U>(l) | static_cast<U>(r));
+}
+
+template <typename E>
+std::enable_if_t<sknonstd::is_bitmask_enum<E>::value, E&> constexpr operator|=(E& l, E r) {
+ return l = l | r;
+}
+
+template <typename E>
+std::enable_if_t<sknonstd::is_bitmask_enum<E>::value, E> constexpr operator&(E l, E r) {
+ using U = std::underlying_type_t<E>;
+ return static_cast<E>(static_cast<U>(l) & static_cast<U>(r));
+}
+
+template <typename E>
+std::enable_if_t<sknonstd::is_bitmask_enum<E>::value, E&> constexpr operator&=(E& l, E r) {
+ return l = l & r;
+}
+
+template <typename E>
+std::enable_if_t<sknonstd::is_bitmask_enum<E>::value, E> constexpr operator^(E l, E r) {
+ using U = std::underlying_type_t<E>;
+ return static_cast<E>(static_cast<U>(l) ^ static_cast<U>(r));
+}
+
+template <typename E>
+std::enable_if_t<sknonstd::is_bitmask_enum<E>::value, E&> constexpr operator^=(E& l, E r) {
+ return l = l ^ r;
+}
+
+template <typename E>
+std::enable_if_t<sknonstd::is_bitmask_enum<E>::value, E> constexpr operator~(E e) {
+ return static_cast<E>(~static_cast<std::underlying_type_t<E>>(e));
+}
+
+#endif // SkEnumOperators_DEFINED
diff --git a/gfx/skia/skia/include/private/SkChecksum.h b/gfx/skia/skia/include/private/SkChecksum.h
new file mode 100644
index 0000000000..d36e726089
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkChecksum.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkChecksum_DEFINED
+#define SkChecksum_DEFINED
+
+#include "include/core/SkString.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkOpts_spi.h"
+#include "include/private/base/SkTLogic.h"
+
+#include <string>
+#include <string_view>
+
+class SkChecksum {
+public:
+ SkChecksum() = default;
+ // Make noncopyable
+ SkChecksum(const SkChecksum&) = delete;
+ SkChecksum& operator=(const SkChecksum&) = delete;
+
+ /**
+ * uint32_t -> uint32_t hash, useful for when you're about to trucate this hash but you
+ * suspect its low bits aren't well mixed.
+ *
+ * This is the Murmur3 finalizer.
+ */
+ static uint32_t Mix(uint32_t hash) {
+ hash ^= hash >> 16;
+ hash *= 0x85ebca6b;
+ hash ^= hash >> 13;
+ hash *= 0xc2b2ae35;
+ hash ^= hash >> 16;
+ return hash;
+ }
+
+ /**
+ * uint32_t -> uint32_t hash, useful for when you're about to trucate this hash but you
+ * suspect its low bits aren't well mixed.
+ *
+ * This version is 2-lines cheaper than Mix, but seems to be sufficient for the font cache.
+ */
+ static uint32_t CheapMix(uint32_t hash) {
+ hash ^= hash >> 16;
+ hash *= 0x85ebca6b;
+ hash ^= hash >> 16;
+ return hash;
+ }
+};
+
+// SkGoodHash should usually be your first choice in hashing data.
+// It should be both reasonably fast and high quality.
+struct SkGoodHash {
+ template <typename K>
+ std::enable_if_t<sizeof(K) == 4, uint32_t> operator()(const K& k) const {
+ return SkChecksum::Mix(*(const uint32_t*)&k);
+ }
+
+ template <typename K>
+ std::enable_if_t<sizeof(K) != 4, uint32_t> operator()(const K& k) const {
+ return SkOpts::hash_fn(&k, sizeof(K), 0);
+ }
+
+ uint32_t operator()(const SkString& k) const {
+ return SkOpts::hash_fn(k.c_str(), k.size(), 0);
+ }
+
+ uint32_t operator()(const std::string& k) const {
+ return SkOpts::hash_fn(k.c_str(), k.size(), 0);
+ }
+
+ uint32_t operator()(std::string_view k) const {
+ return SkOpts::hash_fn(k.data(), k.size(), 0);
+ }
+};
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkColorData.h b/gfx/skia/skia/include/private/SkColorData.h
new file mode 100644
index 0000000000..1bef596a36
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkColorData.h
@@ -0,0 +1,386 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkColorData_DEFINED
+#define SkColorData_DEFINED
+
+#include "include/core/SkColor.h"
+#include "include/core/SkColorPriv.h"
+#include "include/private/base/SkTo.h"
+
+////////////////////////////////////////////////////////////////////////////////////////////
+// Convert a 16bit pixel to a 32bit pixel
+
+#define SK_R16_BITS 5
+#define SK_G16_BITS 6
+#define SK_B16_BITS 5
+
+#define SK_R16_SHIFT (SK_B16_BITS + SK_G16_BITS)
+#define SK_G16_SHIFT (SK_B16_BITS)
+#define SK_B16_SHIFT 0
+
+#define SK_R16_MASK ((1 << SK_R16_BITS) - 1)
+#define SK_G16_MASK ((1 << SK_G16_BITS) - 1)
+#define SK_B16_MASK ((1 << SK_B16_BITS) - 1)
+
+#define SkGetPackedR16(color) (((unsigned)(color) >> SK_R16_SHIFT) & SK_R16_MASK)
+#define SkGetPackedG16(color) (((unsigned)(color) >> SK_G16_SHIFT) & SK_G16_MASK)
+#define SkGetPackedB16(color) (((unsigned)(color) >> SK_B16_SHIFT) & SK_B16_MASK)
+
+static inline unsigned SkR16ToR32(unsigned r) {
+ return (r << (8 - SK_R16_BITS)) | (r >> (2 * SK_R16_BITS - 8));
+}
+
+static inline unsigned SkG16ToG32(unsigned g) {
+ return (g << (8 - SK_G16_BITS)) | (g >> (2 * SK_G16_BITS - 8));
+}
+
+static inline unsigned SkB16ToB32(unsigned b) {
+ return (b << (8 - SK_B16_BITS)) | (b >> (2 * SK_B16_BITS - 8));
+}
+
+#define SkPacked16ToR32(c) SkR16ToR32(SkGetPackedR16(c))
+#define SkPacked16ToG32(c) SkG16ToG32(SkGetPackedG16(c))
+#define SkPacked16ToB32(c) SkB16ToB32(SkGetPackedB16(c))
+
+//////////////////////////////////////////////////////////////////////////////
+
+#define SkASSERT_IS_BYTE(x) SkASSERT(0 == ((x) & ~0xFFu))
+
+// Reverse the bytes coorsponding to RED and BLUE in a packed pixels. Note the
+// pair of them are in the same 2 slots in both RGBA and BGRA, thus there is
+// no need to pass in the colortype to this function.
+static inline uint32_t SkSwizzle_RB(uint32_t c) {
+ static const uint32_t kRBMask = (0xFF << SK_R32_SHIFT) | (0xFF << SK_B32_SHIFT);
+
+ unsigned c0 = (c >> SK_R32_SHIFT) & 0xFF;
+ unsigned c1 = (c >> SK_B32_SHIFT) & 0xFF;
+ return (c & ~kRBMask) | (c0 << SK_B32_SHIFT) | (c1 << SK_R32_SHIFT);
+}
+
+static inline uint32_t SkPackARGB_as_RGBA(U8CPU a, U8CPU r, U8CPU g, U8CPU b) {
+ SkASSERT_IS_BYTE(a);
+ SkASSERT_IS_BYTE(r);
+ SkASSERT_IS_BYTE(g);
+ SkASSERT_IS_BYTE(b);
+ return (a << SK_RGBA_A32_SHIFT) | (r << SK_RGBA_R32_SHIFT) |
+ (g << SK_RGBA_G32_SHIFT) | (b << SK_RGBA_B32_SHIFT);
+}
+
+static inline uint32_t SkPackARGB_as_BGRA(U8CPU a, U8CPU r, U8CPU g, U8CPU b) {
+ SkASSERT_IS_BYTE(a);
+ SkASSERT_IS_BYTE(r);
+ SkASSERT_IS_BYTE(g);
+ SkASSERT_IS_BYTE(b);
+ return (a << SK_BGRA_A32_SHIFT) | (r << SK_BGRA_R32_SHIFT) |
+ (g << SK_BGRA_G32_SHIFT) | (b << SK_BGRA_B32_SHIFT);
+}
+
+static inline SkPMColor SkSwizzle_RGBA_to_PMColor(uint32_t c) {
+#ifdef SK_PMCOLOR_IS_RGBA
+ return c;
+#else
+ return SkSwizzle_RB(c);
+#endif
+}
+
+static inline SkPMColor SkSwizzle_BGRA_to_PMColor(uint32_t c) {
+#ifdef SK_PMCOLOR_IS_BGRA
+ return c;
+#else
+ return SkSwizzle_RB(c);
+#endif
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+///@{
+/** See ITU-R Recommendation BT.709 at http://www.itu.int/rec/R-REC-BT.709/ .*/
+#define SK_ITU_BT709_LUM_COEFF_R (0.2126f)
+#define SK_ITU_BT709_LUM_COEFF_G (0.7152f)
+#define SK_ITU_BT709_LUM_COEFF_B (0.0722f)
+///@}
+
+///@{
+/** A float value which specifies this channel's contribution to luminance. */
+#define SK_LUM_COEFF_R SK_ITU_BT709_LUM_COEFF_R
+#define SK_LUM_COEFF_G SK_ITU_BT709_LUM_COEFF_G
+#define SK_LUM_COEFF_B SK_ITU_BT709_LUM_COEFF_B
+///@}
+
+/** Computes the luminance from the given r, g, and b in accordance with
+ SK_LUM_COEFF_X. For correct results, r, g, and b should be in linear space.
+*/
+static inline U8CPU SkComputeLuminance(U8CPU r, U8CPU g, U8CPU b) {
+ //The following is
+ //r * SK_LUM_COEFF_R + g * SK_LUM_COEFF_G + b * SK_LUM_COEFF_B
+ //with SK_LUM_COEFF_X in 1.8 fixed point (rounding adjusted to sum to 256).
+ return (r * 54 + g * 183 + b * 19) >> 8;
+}
+
+/** Calculates 256 - (value * alpha256) / 255 in range [0,256],
+ * for [0,255] value and [0,256] alpha256.
+ */
+static inline U16CPU SkAlphaMulInv256(U16CPU value, U16CPU alpha256) {
+ unsigned prod = 0xFFFF - value * alpha256;
+ return (prod + (prod >> 8)) >> 8;
+}
+
+// The caller may want negative values, so keep all params signed (int)
+// so we don't accidentally slip into unsigned math and lose the sign
+// extension when we shift (in SkAlphaMul)
+static inline int SkAlphaBlend(int src, int dst, int scale256) {
+ SkASSERT((unsigned)scale256 <= 256);
+ return dst + SkAlphaMul(src - dst, scale256);
+}
+
+static inline uint16_t SkPackRGB16(unsigned r, unsigned g, unsigned b) {
+ SkASSERT(r <= SK_R16_MASK);
+ SkASSERT(g <= SK_G16_MASK);
+ SkASSERT(b <= SK_B16_MASK);
+
+ return SkToU16((r << SK_R16_SHIFT) | (g << SK_G16_SHIFT) | (b << SK_B16_SHIFT));
+}
+
+#define SK_R16_MASK_IN_PLACE (SK_R16_MASK << SK_R16_SHIFT)
+#define SK_G16_MASK_IN_PLACE (SK_G16_MASK << SK_G16_SHIFT)
+#define SK_B16_MASK_IN_PLACE (SK_B16_MASK << SK_B16_SHIFT)
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Abstract 4-byte interpolation, implemented on top of SkPMColor
+ * utility functions. Third parameter controls blending of the first two:
+ * (src, dst, 0) returns dst
+ * (src, dst, 0xFF) returns src
+ * scale is [0..256], unlike SkFourByteInterp which takes [0..255]
+ */
+static inline SkPMColor SkFourByteInterp256(SkPMColor src, SkPMColor dst, int scale) {
+ unsigned a = SkTo<uint8_t>(SkAlphaBlend(SkGetPackedA32(src), SkGetPackedA32(dst), scale));
+ unsigned r = SkTo<uint8_t>(SkAlphaBlend(SkGetPackedR32(src), SkGetPackedR32(dst), scale));
+ unsigned g = SkTo<uint8_t>(SkAlphaBlend(SkGetPackedG32(src), SkGetPackedG32(dst), scale));
+ unsigned b = SkTo<uint8_t>(SkAlphaBlend(SkGetPackedB32(src), SkGetPackedB32(dst), scale));
+
+ return SkPackARGB32(a, r, g, b);
+}
+
+/**
+ * Abstract 4-byte interpolation, implemented on top of SkPMColor
+ * utility functions. Third parameter controls blending of the first two:
+ * (src, dst, 0) returns dst
+ * (src, dst, 0xFF) returns src
+ */
+static inline SkPMColor SkFourByteInterp(SkPMColor src, SkPMColor dst, U8CPU srcWeight) {
+ int scale = (int)SkAlpha255To256(srcWeight);
+ return SkFourByteInterp256(src, dst, scale);
+}
+
+/**
+ * 0xAARRGGBB -> 0x00AA00GG, 0x00RR00BB
+ */
+static inline void SkSplay(uint32_t color, uint32_t* ag, uint32_t* rb) {
+ const uint32_t mask = 0x00FF00FF;
+ *ag = (color >> 8) & mask;
+ *rb = color & mask;
+}
+
+/**
+ * 0xAARRGGBB -> 0x00AA00GG00RR00BB
+ * (note, ARGB -> AGRB)
+ */
+static inline uint64_t SkSplay(uint32_t color) {
+ const uint32_t mask = 0x00FF00FF;
+ uint64_t agrb = (color >> 8) & mask; // 0x0000000000AA00GG
+ agrb <<= 32; // 0x00AA00GG00000000
+ agrb |= color & mask; // 0x00AA00GG00RR00BB
+ return agrb;
+}
+
+/**
+ * 0xAAxxGGxx, 0xRRxxBBxx-> 0xAARRGGBB
+ */
+static inline uint32_t SkUnsplay(uint32_t ag, uint32_t rb) {
+ const uint32_t mask = 0xFF00FF00;
+ return (ag & mask) | ((rb & mask) >> 8);
+}
+
+/**
+ * 0xAAxxGGxxRRxxBBxx -> 0xAARRGGBB
+ * (note, AGRB -> ARGB)
+ */
+static inline uint32_t SkUnsplay(uint64_t agrb) {
+ const uint32_t mask = 0xFF00FF00;
+ return SkPMColor(
+ ((agrb & mask) >> 8) | // 0x00RR00BB
+ ((agrb >> 32) & mask)); // 0xAARRGGBB
+}
+
+static inline SkPMColor SkFastFourByteInterp256_32(SkPMColor src, SkPMColor dst, unsigned scale) {
+ SkASSERT(scale <= 256);
+
+ // Two 8-bit blends per two 32-bit registers, with space to make sure the math doesn't collide.
+ uint32_t src_ag, src_rb, dst_ag, dst_rb;
+ SkSplay(src, &src_ag, &src_rb);
+ SkSplay(dst, &dst_ag, &dst_rb);
+
+ const uint32_t ret_ag = src_ag * scale + (256 - scale) * dst_ag;
+ const uint32_t ret_rb = src_rb * scale + (256 - scale) * dst_rb;
+
+ return SkUnsplay(ret_ag, ret_rb);
+}
+
+static inline SkPMColor SkFastFourByteInterp256_64(SkPMColor src, SkPMColor dst, unsigned scale) {
+ SkASSERT(scale <= 256);
+ // Four 8-bit blends in one 64-bit register, with space to make sure the math doesn't collide.
+ return SkUnsplay(SkSplay(src) * scale + (256-scale) * SkSplay(dst));
+}
+
+// TODO(mtklein): Replace slow versions with fast versions, using scale + (scale>>7) everywhere.
+
+/**
+ * Same as SkFourByteInterp256, but faster.
+ */
+static inline SkPMColor SkFastFourByteInterp256(SkPMColor src, SkPMColor dst, unsigned scale) {
+ // On a 64-bit machine, _64 is about 10% faster than _32, but ~40% slower on a 32-bit machine.
+ if (sizeof(void*) == 4) {
+ return SkFastFourByteInterp256_32(src, dst, scale);
+ } else {
+ return SkFastFourByteInterp256_64(src, dst, scale);
+ }
+}
+
+/**
+ * Nearly the same as SkFourByteInterp, but faster and a touch more accurate, due to better
+ * srcWeight scaling to [0, 256].
+ */
+static inline SkPMColor SkFastFourByteInterp(SkPMColor src, SkPMColor dst, U8CPU srcWeight) {
+ SkASSERT(srcWeight <= 255);
+ // scale = srcWeight + (srcWeight >> 7) is more accurate than
+ // scale = srcWeight + 1, but 7% slower
+ return SkFastFourByteInterp256(src, dst, srcWeight + (srcWeight >> 7));
+}
+
+/**
+ * Interpolates between colors src and dst using [0,256] scale.
+ */
+static inline SkPMColor SkPMLerp(SkPMColor src, SkPMColor dst, unsigned scale) {
+ return SkFastFourByteInterp256(src, dst, scale);
+}
+
+static inline SkPMColor SkBlendARGB32(SkPMColor src, SkPMColor dst, U8CPU aa) {
+ SkASSERT((unsigned)aa <= 255);
+
+ unsigned src_scale = SkAlpha255To256(aa);
+ unsigned dst_scale = SkAlphaMulInv256(SkGetPackedA32(src), src_scale);
+
+ const uint32_t mask = 0xFF00FF;
+
+ uint32_t src_rb = (src & mask) * src_scale;
+ uint32_t src_ag = ((src >> 8) & mask) * src_scale;
+
+ uint32_t dst_rb = (dst & mask) * dst_scale;
+ uint32_t dst_ag = ((dst >> 8) & mask) * dst_scale;
+
+ return (((src_rb + dst_rb) >> 8) & mask) | ((src_ag + dst_ag) & ~mask);
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////
+// Convert a 32bit pixel to a 16bit pixel (no dither)
+
+#define SkR32ToR16_MACRO(r) ((unsigned)(r) >> (SK_R32_BITS - SK_R16_BITS))
+#define SkG32ToG16_MACRO(g) ((unsigned)(g) >> (SK_G32_BITS - SK_G16_BITS))
+#define SkB32ToB16_MACRO(b) ((unsigned)(b) >> (SK_B32_BITS - SK_B16_BITS))
+
+#ifdef SK_DEBUG
+ static inline unsigned SkR32ToR16(unsigned r) {
+ SkR32Assert(r);
+ return SkR32ToR16_MACRO(r);
+ }
+ static inline unsigned SkG32ToG16(unsigned g) {
+ SkG32Assert(g);
+ return SkG32ToG16_MACRO(g);
+ }
+ static inline unsigned SkB32ToB16(unsigned b) {
+ SkB32Assert(b);
+ return SkB32ToB16_MACRO(b);
+ }
+#else
+ #define SkR32ToR16(r) SkR32ToR16_MACRO(r)
+ #define SkG32ToG16(g) SkG32ToG16_MACRO(g)
+ #define SkB32ToB16(b) SkB32ToB16_MACRO(b)
+#endif
+
+static inline U16CPU SkPixel32ToPixel16(SkPMColor c) {
+ unsigned r = ((c >> (SK_R32_SHIFT + (8 - SK_R16_BITS))) & SK_R16_MASK) << SK_R16_SHIFT;
+ unsigned g = ((c >> (SK_G32_SHIFT + (8 - SK_G16_BITS))) & SK_G16_MASK) << SK_G16_SHIFT;
+ unsigned b = ((c >> (SK_B32_SHIFT + (8 - SK_B16_BITS))) & SK_B16_MASK) << SK_B16_SHIFT;
+ return r | g | b;
+}
+
+static inline U16CPU SkPack888ToRGB16(U8CPU r, U8CPU g, U8CPU b) {
+ return (SkR32ToR16(r) << SK_R16_SHIFT) |
+ (SkG32ToG16(g) << SK_G16_SHIFT) |
+ (SkB32ToB16(b) << SK_B16_SHIFT);
+}
+
+/////////////////////////////////////////////////////////////////////////////////////////
+
+static inline SkColor SkPixel16ToColor(U16CPU src) {
+ SkASSERT(src == SkToU16(src));
+
+ unsigned r = SkPacked16ToR32(src);
+ unsigned g = SkPacked16ToG32(src);
+ unsigned b = SkPacked16ToB32(src);
+
+ SkASSERT((r >> (8 - SK_R16_BITS)) == SkGetPackedR16(src));
+ SkASSERT((g >> (8 - SK_G16_BITS)) == SkGetPackedG16(src));
+ SkASSERT((b >> (8 - SK_B16_BITS)) == SkGetPackedB16(src));
+
+ return SkColorSetRGB(r, g, b);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+typedef uint16_t SkPMColor16;
+
+// Put in OpenGL order (r g b a)
+#define SK_A4444_SHIFT 0
+#define SK_R4444_SHIFT 12
+#define SK_G4444_SHIFT 8
+#define SK_B4444_SHIFT 4
+
+static inline U8CPU SkReplicateNibble(unsigned nib) {
+ SkASSERT(nib <= 0xF);
+ return (nib << 4) | nib;
+}
+
+#define SkGetPackedA4444(c) (((unsigned)(c) >> SK_A4444_SHIFT) & 0xF)
+#define SkGetPackedR4444(c) (((unsigned)(c) >> SK_R4444_SHIFT) & 0xF)
+#define SkGetPackedG4444(c) (((unsigned)(c) >> SK_G4444_SHIFT) & 0xF)
+#define SkGetPackedB4444(c) (((unsigned)(c) >> SK_B4444_SHIFT) & 0xF)
+
+#define SkPacked4444ToA32(c) SkReplicateNibble(SkGetPackedA4444(c))
+
+static inline SkPMColor SkPixel4444ToPixel32(U16CPU c) {
+ uint32_t d = (SkGetPackedA4444(c) << SK_A32_SHIFT) |
+ (SkGetPackedR4444(c) << SK_R32_SHIFT) |
+ (SkGetPackedG4444(c) << SK_G32_SHIFT) |
+ (SkGetPackedB4444(c) << SK_B32_SHIFT);
+ return d | (d << 4);
+}
+
+using SkPMColor4f = SkRGBA4f<kPremul_SkAlphaType>;
+
+constexpr SkPMColor4f SK_PMColor4fTRANSPARENT = { 0, 0, 0, 0 };
+constexpr SkPMColor4f SK_PMColor4fBLACK = { 0, 0, 0, 1 };
+constexpr SkPMColor4f SK_PMColor4fWHITE = { 1, 1, 1, 1 };
+constexpr SkPMColor4f SK_PMColor4fILLEGAL = { SK_FloatNegativeInfinity,
+ SK_FloatNegativeInfinity,
+ SK_FloatNegativeInfinity,
+ SK_FloatNegativeInfinity };
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkEncodedInfo.h b/gfx/skia/skia/include/private/SkEncodedInfo.h
new file mode 100644
index 0000000000..74e2ad1480
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkEncodedInfo.h
@@ -0,0 +1,272 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkEncodedInfo_DEFINED
+#define SkEncodedInfo_DEFINED
+
+#include "include/core/SkAlphaType.h"
+#include "include/core/SkColorSpace.h"
+#include "include/core/SkColorType.h"
+#include "include/core/SkData.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypes.h"
+#include "modules/skcms/skcms.h"
+
+#include <cstdint>
+#include <memory>
+#include <utility>
+
+struct SkEncodedInfo {
+public:
+ class ICCProfile {
+ public:
+ static std::unique_ptr<ICCProfile> Make(sk_sp<SkData>);
+ static std::unique_ptr<ICCProfile> Make(const skcms_ICCProfile&);
+
+ const skcms_ICCProfile* profile() const { return &fProfile; }
+ private:
+ ICCProfile(const skcms_ICCProfile&, sk_sp<SkData> = nullptr);
+
+ skcms_ICCProfile fProfile;
+ sk_sp<SkData> fData;
+ };
+
+ enum Alpha {
+ kOpaque_Alpha,
+ kUnpremul_Alpha,
+
+ // Each pixel is either fully opaque or fully transparent.
+ // There is no difference between requesting kPremul or kUnpremul.
+ kBinary_Alpha,
+ };
+
+ /*
+ * We strive to make the number of components per pixel obvious through
+ * our naming conventions.
+ * Ex: kRGB has 3 components. kRGBA has 4 components.
+ *
+ * This sometimes results in redundant Alpha and Color information.
+ * Ex: kRGB images must also be kOpaque.
+ */
+ enum Color {
+ // PNG, WBMP
+ kGray_Color,
+
+ // PNG
+ kGrayAlpha_Color,
+
+ // PNG with Skia-specific sBIT
+ // Like kGrayAlpha, except this expects to be treated as
+ // kAlpha_8_SkColorType, which ignores the gray component. If
+ // decoded to full color (e.g. kN32), the gray component is respected
+ // (so it can share code with kGrayAlpha).
+ kXAlpha_Color,
+
+ // PNG
+ // 565 images may be encoded to PNG by specifying the number of
+ // significant bits for each channel. This is a strange 565
+ // representation because the image is still encoded with 8 bits per
+ // component.
+ k565_Color,
+
+ // PNG, GIF, BMP
+ kPalette_Color,
+
+ // PNG, RAW
+ kRGB_Color,
+ kRGBA_Color,
+
+ // BMP
+ kBGR_Color,
+ kBGRX_Color,
+ kBGRA_Color,
+
+ // JPEG, WEBP
+ kYUV_Color,
+
+ // WEBP
+ kYUVA_Color,
+
+ // JPEG
+ // Photoshop actually writes inverted CMYK data into JPEGs, where zero
+ // represents 100% ink coverage. For this reason, we treat CMYK JPEGs
+ // as having inverted CMYK. libjpeg-turbo warns that this may break
+ // other applications, but the CMYK JPEGs we see on the web expect to
+ // be treated as inverted CMYK.
+ kInvertedCMYK_Color,
+ kYCCK_Color,
+ };
+
+ static SkEncodedInfo Make(int width, int height, Color color, Alpha alpha,
+ int bitsPerComponent) {
+ return Make(width, height, color, alpha, bitsPerComponent, nullptr);
+ }
+
+ static SkEncodedInfo Make(int width, int height, Color color,
+ Alpha alpha, int bitsPerComponent, std::unique_ptr<ICCProfile> profile) {
+ return Make(width, height, color, alpha, /*bitsPerComponent*/ bitsPerComponent,
+ std::move(profile), /*colorDepth*/ bitsPerComponent);
+ }
+
+ static SkEncodedInfo Make(int width, int height, Color color,
+ Alpha alpha, int bitsPerComponent, std::unique_ptr<ICCProfile> profile,
+ int colorDepth) {
+ SkASSERT(1 == bitsPerComponent ||
+ 2 == bitsPerComponent ||
+ 4 == bitsPerComponent ||
+ 8 == bitsPerComponent ||
+ 16 == bitsPerComponent);
+
+ switch (color) {
+ case kGray_Color:
+ SkASSERT(kOpaque_Alpha == alpha);
+ break;
+ case kGrayAlpha_Color:
+ SkASSERT(kOpaque_Alpha != alpha);
+ break;
+ case kPalette_Color:
+ SkASSERT(16 != bitsPerComponent);
+ break;
+ case kRGB_Color:
+ case kBGR_Color:
+ case kBGRX_Color:
+ SkASSERT(kOpaque_Alpha == alpha);
+ SkASSERT(bitsPerComponent >= 8);
+ break;
+ case kYUV_Color:
+ case kInvertedCMYK_Color:
+ case kYCCK_Color:
+ SkASSERT(kOpaque_Alpha == alpha);
+ SkASSERT(8 == bitsPerComponent);
+ break;
+ case kRGBA_Color:
+ SkASSERT(bitsPerComponent >= 8);
+ break;
+ case kBGRA_Color:
+ case kYUVA_Color:
+ SkASSERT(8 == bitsPerComponent);
+ break;
+ case kXAlpha_Color:
+ SkASSERT(kUnpremul_Alpha == alpha);
+ SkASSERT(8 == bitsPerComponent);
+ break;
+ case k565_Color:
+ SkASSERT(kOpaque_Alpha == alpha);
+ SkASSERT(8 == bitsPerComponent);
+ break;
+ default:
+ SkASSERT(false);
+ break;
+ }
+
+ return SkEncodedInfo(width, height, color, alpha,
+ bitsPerComponent, colorDepth, std::move(profile));
+ }
+
+ /*
+ * Returns a recommended SkImageInfo.
+ *
+ * TODO: Leave this up to the client.
+ */
+ SkImageInfo makeImageInfo() const {
+ auto ct = kGray_Color == fColor ? kGray_8_SkColorType :
+ kXAlpha_Color == fColor ? kAlpha_8_SkColorType :
+ k565_Color == fColor ? kRGB_565_SkColorType :
+ kN32_SkColorType ;
+ auto alpha = kOpaque_Alpha == fAlpha ? kOpaque_SkAlphaType
+ : kUnpremul_SkAlphaType;
+ sk_sp<SkColorSpace> cs = fProfile ? SkColorSpace::Make(*fProfile->profile())
+ : nullptr;
+ if (!cs) {
+ cs = SkColorSpace::MakeSRGB();
+ }
+ return SkImageInfo::Make(fWidth, fHeight, ct, alpha, std::move(cs));
+ }
+
+ int width() const { return fWidth; }
+ int height() const { return fHeight; }
+ Color color() const { return fColor; }
+ Alpha alpha() const { return fAlpha; }
+ bool opaque() const { return fAlpha == kOpaque_Alpha; }
+ const skcms_ICCProfile* profile() const {
+ if (!fProfile) return nullptr;
+ return fProfile->profile();
+ }
+
+ uint8_t bitsPerComponent() const { return fBitsPerComponent; }
+
+ uint8_t bitsPerPixel() const {
+ switch (fColor) {
+ case kGray_Color:
+ return fBitsPerComponent;
+ case kXAlpha_Color:
+ case kGrayAlpha_Color:
+ return 2 * fBitsPerComponent;
+ case kPalette_Color:
+ return fBitsPerComponent;
+ case kRGB_Color:
+ case kBGR_Color:
+ case kYUV_Color:
+ case k565_Color:
+ return 3 * fBitsPerComponent;
+ case kRGBA_Color:
+ case kBGRA_Color:
+ case kBGRX_Color:
+ case kYUVA_Color:
+ case kInvertedCMYK_Color:
+ case kYCCK_Color:
+ return 4 * fBitsPerComponent;
+ default:
+ SkASSERT(false);
+ return 0;
+ }
+ }
+
+ SkEncodedInfo(const SkEncodedInfo& orig) = delete;
+ SkEncodedInfo& operator=(const SkEncodedInfo&) = delete;
+
+ SkEncodedInfo(SkEncodedInfo&& orig) = default;
+ SkEncodedInfo& operator=(SkEncodedInfo&&) = default;
+
+ // Explicit copy method, to avoid accidental copying.
+ SkEncodedInfo copy() const {
+ auto copy = SkEncodedInfo::Make(
+ fWidth, fHeight, fColor, fAlpha, fBitsPerComponent, nullptr, fColorDepth);
+ if (fProfile) {
+ copy.fProfile = std::make_unique<ICCProfile>(*fProfile);
+ }
+ return copy;
+ }
+
+ // Return number of bits of R/G/B channel
+ uint8_t getColorDepth() const {
+ return fColorDepth;
+ }
+
+private:
+ SkEncodedInfo(int width, int height, Color color, Alpha alpha,
+ uint8_t bitsPerComponent, uint8_t colorDepth, std::unique_ptr<ICCProfile> profile)
+ : fWidth(width)
+ , fHeight(height)
+ , fColor(color)
+ , fAlpha(alpha)
+ , fBitsPerComponent(bitsPerComponent)
+ , fColorDepth(colorDepth)
+ , fProfile(std::move(profile))
+ {}
+
+ int fWidth;
+ int fHeight;
+ Color fColor;
+ Alpha fAlpha;
+ uint8_t fBitsPerComponent;
+ uint8_t fColorDepth;
+ std::unique_ptr<ICCProfile> fProfile;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkGainmapInfo.h b/gfx/skia/skia/include/private/SkGainmapInfo.h
new file mode 100644
index 0000000000..d477371188
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkGainmapInfo.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2023 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkGainmapInfo_DEFINED
+#define SkGainmapInfo_DEFINED
+
+#include "include/core/SkColor.h"
+
+/**
+ * Gainmap rendering parameters. Suppose our display has HDR to SDR ratio of H and we wish to
+ * display an image with gainmap on this display. Let B be the pixel value from the base image
+ * in a color space that has the primaries of the base image and a linear transfer function. Let
+ * G be the pixel value from the gainmap. Let D be the output pixel in the same color space as B.
+ * The value of D is computed as follows:
+ *
+ * First, let W be a weight parameter determing how much the gainmap will be applied.
+ * W = clamp((log(H) - log(fDisplayRatioSdr)) /
+ * (log(fDisplayRatioHdr) - log(fDisplayRatioSdr), 0, 1)
+ *
+ * Next, let L be the gainmap value in log space. We compute this from the value G that was
+ * sampled from the texture as follows:
+ * L = mix(log(fGainmapRatioMin), log(fGainmapRatioMax), pow(G, fGainmapGamma))
+ *
+ * Finally, apply the gainmap to compute D, the displayed pixel. If the base image is SDR then
+ * compute:
+ * D = (B + fEpsilonSdr) * exp(L * W) - fEpsilonHdr
+ * If the base image is HDR then compute:
+ * D = (B + fEpsilonHdr) * exp(L * (W - 1)) - fEpsilonSdr
+ *
+ * In the above math, log() is a natural logarithm and exp() is natural exponentiation. Note,
+ * however, that the base used for the log() and exp() functions does not affect the results of
+ * the computation (it cancels out, as long as the same base is used throughout).
+ */
+struct SkGainmapInfo {
+ /**
+ * Parameters for converting the gainmap from its image encoding to log space. These are
+ * specified per color channel. The alpha value is unused.
+ */
+ SkColor4f fGainmapRatioMin = {1.f, 1.f, 1.f, 1.0};
+ SkColor4f fGainmapRatioMax = {2.f, 2.f, 2.f, 1.0};
+ SkColor4f fGainmapGamma = {1.f, 1.f, 1.f, 1.f};
+
+ /**
+ * Parameters sometimes used in gainmap computation to avoid numerical instability.
+ */
+ SkColor4f fEpsilonSdr = {0.f, 0.f, 0.f, 1.0};
+ SkColor4f fEpsilonHdr = {0.f, 0.f, 0.f, 1.0};
+
+ /**
+ * If the output display's HDR to SDR ratio is less or equal than fDisplayRatioSdr then the SDR
+ * rendition is displayed. If the output display's HDR to SDR ratio is greater or equal than
+ * fDisplayRatioHdr then the HDR rendition is displayed. If the output display's HDR to SDR
+ * ratio is between these values then an interpolation between the two is displayed using the
+ * math above.
+ */
+ float fDisplayRatioSdr = 1.f;
+ float fDisplayRatioHdr = 2.f;
+
+ /**
+ * Whether the base image is the SDR image or the HDR image.
+ */
+ enum class BaseImageType {
+ kSDR,
+ kHDR,
+ };
+ BaseImageType fBaseImageType = BaseImageType::kSDR;
+
+ // TODO(ccameron): Remove these parameters after the new parameters roll into Android.
+ SkColor4f fLogRatioMin = {0.f, 0.f, 0.f, 1.0};
+ SkColor4f fLogRatioMax = {1.f, 1.f, 1.f, 1.0};
+ float fHdrRatioMin = 1.f;
+ float fHdrRatioMax = 50.f;
+
+ /**
+ * The type of file that created this gainmap.
+ */
+ enum class Type {
+ kUnknown,
+ kMultiPicture,
+ kJpegR_Linear,
+ kJpegR_HLG,
+ kJpegR_PQ,
+ kHDRGM,
+ };
+ Type fType = Type::kUnknown;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkGainmapShader.h b/gfx/skia/skia/include/private/SkGainmapShader.h
new file mode 100644
index 0000000000..f490ab96a4
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkGainmapShader.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2023 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkGainmapShader_DEFINED
+#define SkGainmapShader_DEFINED
+
+#include "include/core/SkRefCnt.h"
+
+class SkColorSpace;
+class SkShader;
+class SkImage;
+struct SkGainmapInfo;
+struct SkRect;
+struct SkSamplingOptions;
+
+/**
+ * A gainmap shader will apply a gainmap to an base image using the math described alongside the
+ * definition of SkGainmapInfo.
+ */
+class SK_API SkGainmapShader {
+public:
+ /**
+ * Make a gainmap shader.
+ *
+ * When sampling the base image baseImage, the rectangle baseRect will be sampled to map to
+ * the rectangle dstRect. Sampling will be done according to baseSamplingOptions.
+ *
+ * When sampling the gainmap image gainmapImage, the rectangle gainmapRect will be sampled to
+ * map to the rectangle dstRect. Sampling will be done according to gainmapSamplingOptions.
+ *
+ * The gainmap will be applied according to the HDR to SDR ratio specified in dstHdrRatio.
+ *
+ * This shader must know the color space of the canvas that it will be rendered to. This color
+ * space must be specified in dstColorSpace.
+ * TODO(ccameron): Remove the need for dstColorSpace.
+ */
+ static sk_sp<SkShader> Make(const sk_sp<const SkImage>& baseImage,
+ const SkRect& baseRect,
+ const SkSamplingOptions& baseSamplingOptions,
+ const sk_sp<const SkImage>& gainmapImage,
+ const SkRect& gainmapRect,
+ const SkSamplingOptions& gainmapSamplingOptions,
+ const SkGainmapInfo& gainmapInfo,
+ const SkRect& dstRect,
+ float dstHdrRatio,
+ sk_sp<SkColorSpace> dstColorSpace);
+};
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkIDChangeListener.h b/gfx/skia/skia/include/private/SkIDChangeListener.h
new file mode 100644
index 0000000000..a32dae1a5a
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkIDChangeListener.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkIDChangeListener_DEFINED
+#define SkIDChangeListener_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/private/base/SkMutex.h"
+#include "include/private/base/SkTArray.h"
+#include "include/private/base/SkThreadAnnotations.h"
+
+#include <atomic>
+
+/**
+ * Used to be notified when a gen/unique ID is invalidated, typically to preemptively purge
+ * associated items from a cache that are no longer reachable. The listener can
+ * be marked for deregistration if the cached item is remove before the listener is
+ * triggered. This prevents unbounded listener growth when cache items are routinely
+ * removed before the gen ID/unique ID is invalidated.
+ */
+class SkIDChangeListener : public SkRefCnt {
+public:
+ SkIDChangeListener();
+
+ ~SkIDChangeListener() override;
+
+ virtual void changed() = 0;
+
+ /**
+ * Mark the listener is no longer needed. It should be removed and changed() should not be
+ * called.
+ */
+ void markShouldDeregister() { fShouldDeregister.store(true, std::memory_order_relaxed); }
+
+ /** Indicates whether markShouldDeregister was called. */
+ bool shouldDeregister() { return fShouldDeregister.load(std::memory_order_acquire); }
+
+ /** Manages a list of SkIDChangeListeners. */
+ class List {
+ public:
+ List();
+
+ ~List();
+
+ /**
+ * Add a new listener to the list. It must not already be deregistered. Also clears out
+ * previously deregistered listeners.
+ */
+ void add(sk_sp<SkIDChangeListener> listener) SK_EXCLUDES(fMutex);
+
+ /**
+ * The number of registered listeners (including deregisterd listeners that are yet-to-be
+ * removed.
+ */
+ int count() const SK_EXCLUDES(fMutex);
+
+ /** Calls changed() on all listeners that haven't been deregistered and resets the list. */
+ void changed() SK_EXCLUDES(fMutex);
+
+ /** Resets without calling changed() on the listeners. */
+ void reset() SK_EXCLUDES(fMutex);
+
+ private:
+ mutable SkMutex fMutex;
+ SkSTArray<1, sk_sp<SkIDChangeListener>> fListeners SK_GUARDED_BY(fMutex);
+ };
+
+private:
+ std::atomic<bool> fShouldDeregister;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkJpegGainmapEncoder.h b/gfx/skia/skia/include/private/SkJpegGainmapEncoder.h
new file mode 100644
index 0000000000..756de78b23
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkJpegGainmapEncoder.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2023 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkJpegGainmapEncoder_DEFINED
+#define SkJpegGainmapEncoder_DEFINED
+
+#include "include/encode/SkJpegEncoder.h"
+
+class SkPixmap;
+class SkWStream;
+struct SkGainmapInfo;
+
+class SK_API SkJpegGainmapEncoder {
+public:
+ /**
+ * Encode a JpegR image to |dst|.
+ *
+ * The base image is specified by |base|, and |baseOptions| controls the encoding behavior for
+ * the base image.
+ *
+ * The gainmap image is specified by |gainmap|, and |gainmapOptions| controls the encoding
+ * behavior for the gainmap image.
+ *
+ * The rendering behavior of the gainmap image is provided in |gainmapInfo|. Not all gainmap
+ * based images are compatible with JpegR. If the image is not compatible with JpegR, then
+ * convert the gainmap to a format that is capable with JpegR. This conversion may result in
+ * less precise quantization of the gainmap image.
+ *
+ * Returns true on success. Returns false on an invalid or unsupported |src|.
+ */
+ static bool EncodeJpegR(SkWStream* dst,
+ const SkPixmap& base,
+ const SkJpegEncoder::Options& baseOptions,
+ const SkPixmap& gainmap,
+ const SkJpegEncoder::Options& gainmapOptions,
+ const SkGainmapInfo& gainmapInfo);
+
+ /**
+ * Encode an HDRGM image to |dst|.
+ *
+ * The base image is specified by |base|, and |baseOptions| controls the encoding behavior for
+ * the base image.
+ *
+ * The gainmap image is specified by |gainmap|, and |gainmapOptions| controls the encoding
+ * behavior for the gainmap image.
+ *
+ * The rendering behavior of the gainmap image is provided in |gainmapInfo|.
+ *
+ * If |baseOptions| or |gainmapOptions| specify XMP metadata, then that metadata will be
+ * overwritten.
+ *
+ * Returns true on success. Returns false on an invalid or unsupported |src|.
+ */
+ static bool EncodeHDRGM(SkWStream* dst,
+ const SkPixmap& base,
+ const SkJpegEncoder::Options& baseOptions,
+ const SkPixmap& gainmap,
+ const SkJpegEncoder::Options& gainmapOptions,
+ const SkGainmapInfo& gainmapInfo);
+
+ /**
+ * Write a Multi Picture Format containing the |imageCount| images specified by |images|.
+ */
+ static bool MakeMPF(SkWStream* dst, const SkData** images, size_t imageCount);
+};
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkOpts_spi.h b/gfx/skia/skia/include/private/SkOpts_spi.h
new file mode 100644
index 0000000000..6e888b77c8
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkOpts_spi.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2020 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOpts_spi_DEFINED
+#define SkOpts_spi_DEFINED
+
+#include "include/private/base/SkAPI.h"
+
+#include <cstddef>
+
+// These are exposed as SK_SPI (e.g. SkParagraph), the rest of SkOpts is
+// declared in src/core
+
+namespace SkOpts {
+ // The fastest high quality 32-bit hash we can provide on this platform.
+ extern uint32_t SK_SPI (*hash_fn)(const void* data, size_t bytes, uint32_t seed);
+} // namespace SkOpts
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkPathRef.h b/gfx/skia/skia/include/private/SkPathRef.h
new file mode 100644
index 0000000000..5e48086d35
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkPathRef.h
@@ -0,0 +1,539 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPathRef_DEFINED
+#define SkPathRef_DEFINED
+
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkIDChangeListener.h"
+#include "include/private/base/SkDebug.h"
+#include "include/private/base/SkTArray.h"
+#include "include/private/base/SkTo.h"
+
+#include <atomic>
+#include <cstddef>
+#include <cstdint>
+#include <tuple>
+#include <utility>
+
+class SkMatrix;
+class SkRRect;
+
+// These are computed from a stream of verbs
+struct SkPathVerbAnalysis {
+ bool valid;
+ int points, weights;
+ unsigned segmentMask;
+};
+SkPathVerbAnalysis sk_path_analyze_verbs(const uint8_t verbs[], int count);
+
+
+/**
+ * Holds the path verbs and points. It is versioned by a generation ID. None of its public methods
+ * modify the contents. To modify or append to the verbs/points wrap the SkPathRef in an
+ * SkPathRef::Editor object. Installing the editor resets the generation ID. It also performs
+ * copy-on-write if the SkPathRef is shared by multiple SkPaths. The caller passes the Editor's
+ * constructor a pointer to a sk_sp<SkPathRef>, which may be updated to point to a new SkPathRef
+ * after the editor's constructor returns.
+ *
+ * The points and verbs are stored in a single allocation. The points are at the begining of the
+ * allocation while the verbs are stored at end of the allocation, in reverse order. Thus the points
+ * and verbs both grow into the middle of the allocation until the meet. To access verb i in the
+ * verb array use ref.verbs()[~i] (because verbs() returns a pointer just beyond the first
+ * logical verb or the last verb in memory).
+ */
+
+class SK_API SkPathRef final : public SkNVRefCnt<SkPathRef> {
+public:
+ // See https://bugs.chromium.org/p/skia/issues/detail?id=13817 for how these sizes were
+ // determined.
+ using PointsArray = SkSTArray<4, SkPoint>;
+ using VerbsArray = SkSTArray<4, uint8_t>;
+ using ConicWeightsArray = SkSTArray<2, SkScalar>;
+
+ SkPathRef(PointsArray points, VerbsArray verbs, ConicWeightsArray weights,
+ unsigned segmentMask)
+ : fPoints(std::move(points))
+ , fVerbs(std::move(verbs))
+ , fConicWeights(std::move(weights))
+ {
+ fBoundsIsDirty = true; // this also invalidates fIsFinite
+ fGenerationID = 0; // recompute
+ fSegmentMask = segmentMask;
+ fIsOval = false;
+ fIsRRect = false;
+ // The next two values don't matter unless fIsOval or fIsRRect are true.
+ fRRectOrOvalIsCCW = false;
+ fRRectOrOvalStartIdx = 0xAC;
+ SkDEBUGCODE(fEditorsAttached.store(0);)
+
+ this->computeBounds(); // do this now, before we worry about multiple owners/threads
+ SkDEBUGCODE(this->validate();)
+ }
+
+ class Editor {
+ public:
+ Editor(sk_sp<SkPathRef>* pathRef,
+ int incReserveVerbs = 0,
+ int incReservePoints = 0);
+
+ ~Editor() { SkDEBUGCODE(fPathRef->fEditorsAttached--;) }
+
+ /**
+ * Returns the array of points.
+ */
+ SkPoint* writablePoints() { return fPathRef->getWritablePoints(); }
+ const SkPoint* points() const { return fPathRef->points(); }
+
+ /**
+ * Gets the ith point. Shortcut for this->points() + i
+ */
+ SkPoint* atPoint(int i) { return fPathRef->getWritablePoints() + i; }
+ const SkPoint* atPoint(int i) const { return &fPathRef->fPoints[i]; }
+
+ /**
+ * Adds the verb and allocates space for the number of points indicated by the verb. The
+ * return value is a pointer to where the points for the verb should be written.
+ * 'weight' is only used if 'verb' is kConic_Verb
+ */
+ SkPoint* growForVerb(int /*SkPath::Verb*/ verb, SkScalar weight = 0) {
+ SkDEBUGCODE(fPathRef->validate();)
+ return fPathRef->growForVerb(verb, weight);
+ }
+
+ /**
+ * Allocates space for multiple instances of a particular verb and the
+ * requisite points & weights.
+ * The return pointer points at the first new point (indexed normally [<i>]).
+ * If 'verb' is kConic_Verb, 'weights' will return a pointer to the
+ * space for the conic weights (indexed normally).
+ */
+ SkPoint* growForRepeatedVerb(int /*SkPath::Verb*/ verb,
+ int numVbs,
+ SkScalar** weights = nullptr) {
+ return fPathRef->growForRepeatedVerb(verb, numVbs, weights);
+ }
+
+ /**
+ * Concatenates all verbs from 'path' onto the pathRef's verbs array. Increases the point
+ * count by the number of points in 'path', and the conic weight count by the number of
+ * conics in 'path'.
+ *
+ * Returns pointers to the uninitialized points and conic weights data.
+ */
+ std::tuple<SkPoint*, SkScalar*> growForVerbsInPath(const SkPathRef& path) {
+ return fPathRef->growForVerbsInPath(path);
+ }
+
+ /**
+ * Resets the path ref to a new verb and point count. The new verbs and points are
+ * uninitialized.
+ */
+ void resetToSize(int newVerbCnt, int newPointCnt, int newConicCount) {
+ fPathRef->resetToSize(newVerbCnt, newPointCnt, newConicCount);
+ }
+
+ /**
+ * Gets the path ref that is wrapped in the Editor.
+ */
+ SkPathRef* pathRef() { return fPathRef; }
+
+ void setIsOval(bool isOval, bool isCCW, unsigned start) {
+ fPathRef->setIsOval(isOval, isCCW, start);
+ }
+
+ void setIsRRect(bool isRRect, bool isCCW, unsigned start) {
+ fPathRef->setIsRRect(isRRect, isCCW, start);
+ }
+
+ void setBounds(const SkRect& rect) { fPathRef->setBounds(rect); }
+
+ private:
+ SkPathRef* fPathRef;
+ };
+
+ class SK_API Iter {
+ public:
+ Iter();
+ Iter(const SkPathRef&);
+
+ void setPathRef(const SkPathRef&);
+
+ /** Return the next verb in this iteration of the path. When all
+ segments have been visited, return kDone_Verb.
+
+ If any point in the path is non-finite, return kDone_Verb immediately.
+
+ @param pts The points representing the current verb and/or segment
+ This must not be NULL.
+ @return The verb for the current segment
+ */
+ uint8_t next(SkPoint pts[4]);
+ uint8_t peek() const;
+
+ SkScalar conicWeight() const { return *fConicWeights; }
+
+ private:
+ const SkPoint* fPts;
+ const uint8_t* fVerbs;
+ const uint8_t* fVerbStop;
+ const SkScalar* fConicWeights;
+ };
+
+public:
+ /**
+ * Gets a path ref with no verbs or points.
+ */
+ static SkPathRef* CreateEmpty();
+
+ /**
+ * Returns true if all of the points in this path are finite, meaning there
+ * are no infinities and no NaNs.
+ */
+ bool isFinite() const {
+ if (fBoundsIsDirty) {
+ this->computeBounds();
+ }
+ return SkToBool(fIsFinite);
+ }
+
+ /**
+ * Returns a mask, where each bit corresponding to a SegmentMask is
+ * set if the path contains 1 or more segments of that type.
+ * Returns 0 for an empty path (no segments).
+ */
+ uint32_t getSegmentMasks() const { return fSegmentMask; }
+
+ /** Returns true if the path is an oval.
+ *
+ * @param rect returns the bounding rect of this oval. It's a circle
+ * if the height and width are the same.
+ * @param isCCW is the oval CCW (or CW if false).
+ * @param start indicates where the contour starts on the oval (see
+ * SkPath::addOval for intepretation of the index).
+ *
+ * @return true if this path is an oval.
+ * Tracking whether a path is an oval is considered an
+ * optimization for performance and so some paths that are in
+ * fact ovals can report false.
+ */
+ bool isOval(SkRect* rect, bool* isCCW, unsigned* start) const {
+ if (fIsOval) {
+ if (rect) {
+ *rect = this->getBounds();
+ }
+ if (isCCW) {
+ *isCCW = SkToBool(fRRectOrOvalIsCCW);
+ }
+ if (start) {
+ *start = fRRectOrOvalStartIdx;
+ }
+ }
+
+ return SkToBool(fIsOval);
+ }
+
+ bool isRRect(SkRRect* rrect, bool* isCCW, unsigned* start) const;
+
+ bool hasComputedBounds() const {
+ return !fBoundsIsDirty;
+ }
+
+ /** Returns the bounds of the path's points. If the path contains 0 or 1
+ points, the bounds is set to (0,0,0,0), and isEmpty() will return true.
+ Note: this bounds may be larger than the actual shape, since curves
+ do not extend as far as their control points.
+ */
+ const SkRect& getBounds() const {
+ if (fBoundsIsDirty) {
+ this->computeBounds();
+ }
+ return fBounds;
+ }
+
+ SkRRect getRRect() const;
+
+ /**
+ * Transforms a path ref by a matrix, allocating a new one only if necessary.
+ */
+ static void CreateTransformedCopy(sk_sp<SkPathRef>* dst,
+ const SkPathRef& src,
+ const SkMatrix& matrix);
+
+ // static SkPathRef* CreateFromBuffer(SkRBuffer* buffer);
+
+ /**
+ * Rollsback a path ref to zero verbs and points with the assumption that the path ref will be
+ * repopulated with approximately the same number of verbs and points. A new path ref is created
+ * only if necessary.
+ */
+ static void Rewind(sk_sp<SkPathRef>* pathRef);
+
+ ~SkPathRef();
+ int countPoints() const { return fPoints.size(); }
+ int countVerbs() const { return fVerbs.size(); }
+ int countWeights() const { return fConicWeights.size(); }
+
+ size_t approximateBytesUsed() const;
+
+ /**
+ * Returns a pointer one beyond the first logical verb (last verb in memory order).
+ */
+ const uint8_t* verbsBegin() const { return fVerbs.begin(); }
+
+ /**
+ * Returns a const pointer to the first verb in memory (which is the last logical verb).
+ */
+ const uint8_t* verbsEnd() const { return fVerbs.end(); }
+
+ /**
+ * Returns a const pointer to the first point.
+ */
+ const SkPoint* points() const { return fPoints.begin(); }
+
+ /**
+ * Shortcut for this->points() + this->countPoints()
+ */
+ const SkPoint* pointsEnd() const { return this->points() + this->countPoints(); }
+
+ const SkScalar* conicWeights() const { return fConicWeights.begin(); }
+ const SkScalar* conicWeightsEnd() const { return fConicWeights.end(); }
+
+ /**
+ * Convenience methods for getting to a verb or point by index.
+ */
+ uint8_t atVerb(int index) const { return fVerbs[index]; }
+ const SkPoint& atPoint(int index) const { return fPoints[index]; }
+
+ bool operator== (const SkPathRef& ref) const;
+
+ void interpolate(const SkPathRef& ending, SkScalar weight, SkPathRef* out) const;
+
+ /**
+ * Gets an ID that uniquely identifies the contents of the path ref. If two path refs have the
+ * same ID then they have the same verbs and points. However, two path refs may have the same
+ * contents but different genIDs.
+ * skbug.com/1762 for background on why fillType is necessary (for now).
+ */
+ uint32_t genID(uint8_t fillType) const;
+
+ void addGenIDChangeListener(sk_sp<SkIDChangeListener>); // Threadsafe.
+ int genIDChangeListenerCount(); // Threadsafe
+
+ bool dataMatchesVerbs() const;
+ bool isValid() const;
+ SkDEBUGCODE(void validate() const { SkASSERT(this->isValid()); } )
+
+ /**
+ * Resets this SkPathRef to a clean state.
+ */
+ void reset();
+
+ bool isInitialEmptyPathRef() const {
+ return fGenerationID == kEmptyGenID;
+ }
+
+private:
+ enum SerializationOffsets {
+ kLegacyRRectOrOvalStartIdx_SerializationShift = 28, // requires 3 bits, ignored.
+ kLegacyRRectOrOvalIsCCW_SerializationShift = 27, // requires 1 bit, ignored.
+ kLegacyIsRRect_SerializationShift = 26, // requires 1 bit, ignored.
+ kIsFinite_SerializationShift = 25, // requires 1 bit
+ kLegacyIsOval_SerializationShift = 24, // requires 1 bit, ignored.
+ kSegmentMask_SerializationShift = 0 // requires 4 bits (deprecated)
+ };
+
+ SkPathRef(int numVerbs = 0, int numPoints = 0) {
+ fBoundsIsDirty = true; // this also invalidates fIsFinite
+ fGenerationID = kEmptyGenID;
+ fSegmentMask = 0;
+ fIsOval = false;
+ fIsRRect = false;
+ // The next two values don't matter unless fIsOval or fIsRRect are true.
+ fRRectOrOvalIsCCW = false;
+ fRRectOrOvalStartIdx = 0xAC;
+ if (numPoints > 0)
+ fPoints.reserve_back(numPoints);
+ if (numVerbs > 0)
+ fVerbs.reserve_back(numVerbs);
+ SkDEBUGCODE(fEditorsAttached.store(0);)
+ SkDEBUGCODE(this->validate();)
+ }
+
+ void copy(const SkPathRef& ref, int additionalReserveVerbs, int additionalReservePoints);
+
+ // Return true if the computed bounds are finite.
+ static bool ComputePtBounds(SkRect* bounds, const SkPathRef& ref) {
+ return bounds->setBoundsCheck(ref.points(), ref.countPoints());
+ }
+
+ // called, if dirty, by getBounds()
+ void computeBounds() const {
+ SkDEBUGCODE(this->validate();)
+ // TODO(mtklein): remove fBoundsIsDirty and fIsFinite,
+ // using an inverted rect instead of fBoundsIsDirty and always recalculating fIsFinite.
+ SkASSERT(fBoundsIsDirty);
+
+ fIsFinite = ComputePtBounds(&fBounds, *this);
+ fBoundsIsDirty = false;
+ }
+
+ void setBounds(const SkRect& rect) {
+ SkASSERT(rect.fLeft <= rect.fRight && rect.fTop <= rect.fBottom);
+ fBounds = rect;
+ fBoundsIsDirty = false;
+ fIsFinite = fBounds.isFinite();
+ }
+
+ /** Makes additional room but does not change the counts or change the genID */
+ void incReserve(int additionalVerbs, int additionalPoints) {
+ SkDEBUGCODE(this->validate();)
+ // Use reserve() so that if there is not enough space, the array will grow with some
+ // additional space. This ensures repeated calls to grow won't always allocate.
+ if (additionalPoints > 0)
+ fPoints.reserve(fPoints.size() + additionalPoints);
+ if (additionalVerbs > 0)
+ fVerbs.reserve(fVerbs.size() + additionalVerbs);
+ SkDEBUGCODE(this->validate();)
+ }
+
+ /**
+ * Resets all state except that of the verbs, points, and conic-weights.
+ * Intended to be called from other functions that reset state.
+ */
+ void commonReset() {
+ SkDEBUGCODE(this->validate();)
+ this->callGenIDChangeListeners();
+ fBoundsIsDirty = true; // this also invalidates fIsFinite
+ fGenerationID = 0;
+
+ fSegmentMask = 0;
+ fIsOval = false;
+ fIsRRect = false;
+ }
+
+ /** Resets the path ref with verbCount verbs and pointCount points, all uninitialized. Also
+ * allocates space for reserveVerb additional verbs and reservePoints additional points.*/
+ void resetToSize(int verbCount, int pointCount, int conicCount,
+ int reserveVerbs = 0, int reservePoints = 0) {
+ commonReset();
+ // Use reserve_back() so the arrays are sized to exactly fit the data.
+ const int pointDelta = pointCount + reservePoints - fPoints.size();
+ if (pointDelta > 0) {
+ fPoints.reserve_back(pointDelta);
+ }
+ fPoints.resize_back(pointCount);
+ const int verbDelta = verbCount + reserveVerbs - fVerbs.size();
+ if (verbDelta > 0) {
+ fVerbs.reserve_back(verbDelta);
+ }
+ fVerbs.resize_back(verbCount);
+ fConicWeights.resize_back(conicCount);
+ SkDEBUGCODE(this->validate();)
+ }
+
+ /**
+ * Increases the verb count by numVbs and point count by the required amount.
+ * The new points are uninitialized. All the new verbs are set to the specified
+ * verb. If 'verb' is kConic_Verb, 'weights' will return a pointer to the
+ * uninitialized conic weights.
+ */
+ SkPoint* growForRepeatedVerb(int /*SkPath::Verb*/ verb, int numVbs, SkScalar** weights);
+
+ /**
+ * Increases the verb count 1, records the new verb, and creates room for the requisite number
+ * of additional points. A pointer to the first point is returned. Any new points are
+ * uninitialized.
+ */
+ SkPoint* growForVerb(int /*SkPath::Verb*/ verb, SkScalar weight);
+
+ /**
+ * Concatenates all verbs from 'path' onto our own verbs array. Increases the point count by the
+ * number of points in 'path', and the conic weight count by the number of conics in 'path'.
+ *
+ * Returns pointers to the uninitialized points and conic weights data.
+ */
+ std::tuple<SkPoint*, SkScalar*> growForVerbsInPath(const SkPathRef& path);
+
+ /**
+ * Private, non-const-ptr version of the public function verbsMemBegin().
+ */
+ uint8_t* verbsBeginWritable() { return fVerbs.begin(); }
+
+ /**
+ * Called the first time someone calls CreateEmpty to actually create the singleton.
+ */
+ friend SkPathRef* sk_create_empty_pathref();
+
+ void setIsOval(bool isOval, bool isCCW, unsigned start) {
+ fIsOval = isOval;
+ fRRectOrOvalIsCCW = isCCW;
+ fRRectOrOvalStartIdx = SkToU8(start);
+ }
+
+ void setIsRRect(bool isRRect, bool isCCW, unsigned start) {
+ fIsRRect = isRRect;
+ fRRectOrOvalIsCCW = isCCW;
+ fRRectOrOvalStartIdx = SkToU8(start);
+ }
+
+ // called only by the editor. Note that this is not a const function.
+ SkPoint* getWritablePoints() {
+ SkDEBUGCODE(this->validate();)
+ fIsOval = false;
+ fIsRRect = false;
+ return fPoints.begin();
+ }
+
+ const SkPoint* getPoints() const {
+ SkDEBUGCODE(this->validate();)
+ return fPoints.begin();
+ }
+
+ void callGenIDChangeListeners();
+
+ enum {
+ kMinSize = 256,
+ };
+
+ mutable SkRect fBounds;
+
+ PointsArray fPoints;
+ VerbsArray fVerbs;
+ ConicWeightsArray fConicWeights;
+
+ enum {
+ kEmptyGenID = 1, // GenID reserved for path ref with zero points and zero verbs.
+ };
+ mutable uint32_t fGenerationID;
+ SkDEBUGCODE(std::atomic<int> fEditorsAttached;) // assert only one editor in use at any time.
+
+ SkIDChangeListener::List fGenIDChangeListeners;
+
+ mutable uint8_t fBoundsIsDirty;
+ mutable bool fIsFinite; // only meaningful if bounds are valid
+
+ bool fIsOval;
+ bool fIsRRect;
+ // Both the circle and rrect special cases have a notion of direction and starting point
+ // The next two variables store that information for either.
+ bool fRRectOrOvalIsCCW;
+ uint8_t fRRectOrOvalStartIdx;
+ uint8_t fSegmentMask;
+
+ friend class PathRefTest_Private;
+ friend class ForceIsRRect_Private; // unit test isRRect
+ friend class SkPath;
+ friend class SkPathBuilder;
+ friend class SkPathPriv;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkSLDefines.h b/gfx/skia/skia/include/private/SkSLDefines.h
new file mode 100644
index 0000000000..a258054229
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkSLDefines.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_DEFINES
+#define SKSL_DEFINES
+
+#include <cstdint>
+
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkTArray.h"
+
+using SKSL_INT = int64_t;
+using SKSL_FLOAT = float;
+
+namespace SkSL {
+
+class Expression;
+class Statement;
+
+using ComponentArray = SkSTArray<4, int8_t>; // for Swizzles
+
+class ExpressionArray : public SkSTArray<2, std::unique_ptr<Expression>> {
+public:
+ using SkSTArray::SkSTArray;
+
+ /** Returns a new ExpressionArray containing a clone of every element. */
+ ExpressionArray clone() const;
+};
+
+using StatementArray = SkSTArray<2, std::unique_ptr<Statement>>;
+
+// Functions larger than this (measured in IR nodes) will not be inlined. This growth factor
+// accounts for the number of calls being inlined--i.e., a function called five times (that is, with
+// five inlining opportunities) would be considered 5x larger than if it were called once. This
+// default threshold value is arbitrary, but tends to work well in practice.
+static constexpr int kDefaultInlineThreshold = 50;
+
+// A hard upper limit on the number of variable slots allowed in a function/global scope.
+// This is an arbitrary limit, but is needed to prevent code generation from taking unbounded
+// amounts of time or space.
+static constexpr int kVariableSlotLimit = 100000;
+
+// The SwizzleComponent namespace is used both by the SkSL::Swizzle expression, and the DSL swizzle.
+// This namespace is injected into SkSL::dsl so that `using namespace SkSL::dsl` enables DSL code
+// like `Swizzle(var, X, Y, ONE)` to compile without any extra qualifications.
+namespace SwizzleComponent {
+
+enum Type : int8_t {
+ X = 0, Y = 1, Z = 2, W = 3,
+ R = 4, G = 5, B = 6, A = 7,
+ S = 8, T = 9, P = 10, Q = 11,
+ UL = 12, UT = 13, UR = 14, UB = 15,
+ ZERO,
+ ONE
+};
+
+} // namespace SwizzleComponent
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkSLIRNode.h b/gfx/skia/skia/include/private/SkSLIRNode.h
new file mode 100644
index 0000000000..8fb4279b76
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkSLIRNode.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_IRNODE
+#define SKSL_IRNODE
+
+#include "include/sksl/SkSLPosition.h"
+#include "src/sksl/SkSLPool.h"
+
+#include <string>
+
+namespace SkSL {
+
+// The fKind field of IRNode could contain any of these values.
+enum class ProgramElementKind {
+ kExtension = 0,
+ kFunction,
+ kFunctionPrototype,
+ kGlobalVar,
+ kInterfaceBlock,
+ kModifiers,
+ kStructDefinition,
+
+ kFirst = kExtension,
+ kLast = kStructDefinition
+};
+
+enum class SymbolKind {
+ kExternal = (int) ProgramElementKind::kLast + 1,
+ kField,
+ kFunctionDeclaration,
+ kType,
+ kVariable,
+
+ kFirst = kExternal,
+ kLast = kVariable
+};
+
+enum class StatementKind {
+ kBlock = (int) SymbolKind::kLast + 1,
+ kBreak,
+ kContinue,
+ kDiscard,
+ kDo,
+ kExpression,
+ kFor,
+ kIf,
+ kNop,
+ kReturn,
+ kSwitch,
+ kSwitchCase,
+ kVarDeclaration,
+
+ kFirst = kBlock,
+ kLast = kVarDeclaration,
+};
+
+enum class ExpressionKind {
+ kBinary = (int) StatementKind::kLast + 1,
+ kChildCall,
+ kConstructorArray,
+ kConstructorArrayCast,
+ kConstructorCompound,
+ kConstructorCompoundCast,
+ kConstructorDiagonalMatrix,
+ kConstructorMatrixResize,
+ kConstructorScalarCast,
+ kConstructorSplat,
+ kConstructorStruct,
+ kFieldAccess,
+ kFunctionReference,
+ kFunctionCall,
+ kIndex,
+ kLiteral,
+ kMethodReference,
+ kPoison,
+ kPostfix,
+ kPrefix,
+ kSetting,
+ kSwizzle,
+ kTernary,
+ kTypeReference,
+ kVariableReference,
+
+ kFirst = kBinary,
+ kLast = kVariableReference
+};
+
+/**
+ * Represents a node in the intermediate representation (IR) tree. The IR is a fully-resolved
+ * version of the program (all types determined, everything validated), ready for code generation.
+ */
+class IRNode : public Poolable {
+public:
+ virtual ~IRNode() {}
+
+ virtual std::string description() const = 0;
+
+ // No copy construction or assignment
+ IRNode(const IRNode&) = delete;
+ IRNode& operator=(const IRNode&) = delete;
+
+ // position of this element within the program being compiled, for error reporting purposes
+ Position fPosition;
+
+ /**
+ * Use is<T> to check the type of an IRNode.
+ * e.g. replace `s.kind() == Statement::Kind::kReturn` with `s.is<ReturnStatement>()`.
+ */
+ template <typename T>
+ bool is() const {
+ return this->fKind == (int)T::kIRNodeKind;
+ }
+
+ /**
+ * Use as<T> to downcast IRNodes.
+ * e.g. replace `(ReturnStatement&) s` with `s.as<ReturnStatement>()`.
+ */
+ template <typename T>
+ const T& as() const {
+ SkASSERT(this->is<T>());
+ return static_cast<const T&>(*this);
+ }
+
+ template <typename T>
+ T& as() {
+ SkASSERT(this->is<T>());
+ return static_cast<T&>(*this);
+ }
+
+protected:
+ IRNode(Position position, int kind)
+ : fPosition(position)
+ , fKind(kind) {}
+
+ int fKind;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkSLLayout.h b/gfx/skia/skia/include/private/SkSLLayout.h
new file mode 100644
index 0000000000..a99f18a477
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkSLLayout.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_LAYOUT
+#define SKSL_LAYOUT
+
+#include <string>
+
+namespace SkSL {
+
+/**
+ * Represents a layout block appearing before a variable declaration, as in:
+ *
+ * layout (location = 0) int x;
+ */
+struct Layout {
+ enum Flag {
+ kOriginUpperLeft_Flag = 1 << 0,
+ kPushConstant_Flag = 1 << 1,
+ kBlendSupportAllEquations_Flag = 1 << 2,
+ kColor_Flag = 1 << 3,
+
+ // These flags indicate if the qualifier appeared, regardless of the accompanying value.
+ kLocation_Flag = 1 << 4,
+ kOffset_Flag = 1 << 5,
+ kBinding_Flag = 1 << 6,
+ kTexture_Flag = 1 << 7,
+ kSampler_Flag = 1 << 8,
+ kIndex_Flag = 1 << 9,
+ kSet_Flag = 1 << 10,
+ kBuiltin_Flag = 1 << 11,
+ kInputAttachmentIndex_Flag = 1 << 12,
+
+ // These flags indicate the backend type; only one at most can be set.
+ kSPIRV_Flag = 1 << 13,
+ kMetal_Flag = 1 << 14,
+ kGL_Flag = 1 << 15,
+ kWGSL_Flag = 1 << 16,
+ };
+
+ static constexpr int kAllBackendFlagsMask =
+ Layout::kSPIRV_Flag | Layout::kMetal_Flag | Layout::kGL_Flag | Layout::kWGSL_Flag;
+
+ Layout(int flags, int location, int offset, int binding, int index, int set, int builtin,
+ int inputAttachmentIndex)
+ : fFlags(flags)
+ , fLocation(location)
+ , fOffset(offset)
+ , fBinding(binding)
+ , fIndex(index)
+ , fSet(set)
+ , fBuiltin(builtin)
+ , fInputAttachmentIndex(inputAttachmentIndex) {}
+
+ Layout() = default;
+
+ static Layout builtin(int builtin) {
+ Layout result;
+ result.fBuiltin = builtin;
+ return result;
+ }
+
+ std::string description() const;
+
+ bool operator==(const Layout& other) const;
+
+ bool operator!=(const Layout& other) const {
+ return !(*this == other);
+ }
+
+ int fFlags = 0;
+ int fLocation = -1;
+ int fOffset = -1;
+ int fBinding = -1;
+ int fTexture = -1;
+ int fSampler = -1;
+ int fIndex = -1;
+ int fSet = -1;
+ // builtin comes from SPIR-V and identifies which particular builtin value this object
+ // represents.
+ int fBuiltin = -1;
+ // input_attachment_index comes from Vulkan/SPIR-V to connect a shader variable to the a
+ // corresponding attachment on the subpass in which the shader is being used.
+ int fInputAttachmentIndex = -1;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkSLModifiers.h b/gfx/skia/skia/include/private/SkSLModifiers.h
new file mode 100644
index 0000000000..7e8efddf19
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkSLModifiers.h
@@ -0,0 +1,178 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_MODIFIERS
+#define SKSL_MODIFIERS
+
+#include "include/private/SkSLLayout.h"
+
+#include <cstddef>
+#include <memory>
+#include <string>
+
+namespace SkSL {
+
+class Context;
+class Position;
+
+/**
+ * A set of modifier keywords (in, out, uniform, etc.) appearing before a declaration.
+ */
+struct Modifiers {
+ /**
+ * OpenGL requires modifiers to be in a strict order:
+ * - invariant-qualifier: (invariant)
+ * - interpolation-qualifier: flat, noperspective, (smooth)
+ * - storage-qualifier: const, uniform
+ * - parameter-qualifier: in, out, inout
+ * - precision-qualifier: highp, mediump, lowp
+ *
+ * SkSL does not have `invariant` or `smooth`.
+ */
+
+ enum Flag {
+ kNo_Flag = 0,
+ // Real GLSL modifiers
+ kFlat_Flag = 1 << 0,
+ kNoPerspective_Flag = 1 << 1,
+ kConst_Flag = 1 << 2,
+ kUniform_Flag = 1 << 3,
+ kIn_Flag = 1 << 4,
+ kOut_Flag = 1 << 5,
+ kHighp_Flag = 1 << 6,
+ kMediump_Flag = 1 << 7,
+ kLowp_Flag = 1 << 8,
+ kReadOnly_Flag = 1 << 9,
+ kWriteOnly_Flag = 1 << 10,
+ kBuffer_Flag = 1 << 11,
+ // Corresponds to the GLSL 'shared' modifier. Only allowed in a compute program.
+ kWorkgroup_Flag = 1 << 12,
+ // SkSL extensions, not present in GLSL
+ kExport_Flag = 1 << 13,
+ kES3_Flag = 1 << 14,
+ kPure_Flag = 1 << 15,
+ kInline_Flag = 1 << 16,
+ kNoInline_Flag = 1 << 17,
+ };
+
+ Modifiers()
+ : fLayout(Layout())
+ , fFlags(0) {}
+
+ Modifiers(const Layout& layout, int flags)
+ : fLayout(layout)
+ , fFlags(flags) {}
+
+ std::string description() const {
+ return fLayout.description() + DescribeFlags(fFlags) + " ";
+ }
+
+ static std::string DescribeFlags(int flags) {
+ // SkSL extensions
+ std::string result;
+ if (flags & kExport_Flag) {
+ result += "$export ";
+ }
+ if (flags & kES3_Flag) {
+ result += "$es3 ";
+ }
+ if (flags & kPure_Flag) {
+ result += "$pure ";
+ }
+ if (flags & kInline_Flag) {
+ result += "inline ";
+ }
+ if (flags & kNoInline_Flag) {
+ result += "noinline ";
+ }
+
+ // Real GLSL qualifiers (must be specified in order in GLSL 4.1 and below)
+ if (flags & kFlat_Flag) {
+ result += "flat ";
+ }
+ if (flags & kNoPerspective_Flag) {
+ result += "noperspective ";
+ }
+ if (flags & kConst_Flag) {
+ result += "const ";
+ }
+ if (flags & kUniform_Flag) {
+ result += "uniform ";
+ }
+ if ((flags & kIn_Flag) && (flags & kOut_Flag)) {
+ result += "inout ";
+ } else if (flags & kIn_Flag) {
+ result += "in ";
+ } else if (flags & kOut_Flag) {
+ result += "out ";
+ }
+ if (flags & kHighp_Flag) {
+ result += "highp ";
+ }
+ if (flags & kMediump_Flag) {
+ result += "mediump ";
+ }
+ if (flags & kLowp_Flag) {
+ result += "lowp ";
+ }
+ if (flags & kReadOnly_Flag) {
+ result += "readonly ";
+ }
+ if (flags & kWriteOnly_Flag) {
+ result += "writeonly ";
+ }
+ if (flags & kBuffer_Flag) {
+ result += "buffer ";
+ }
+
+ // We're using a non-GLSL name for this one; the GLSL equivalent is "shared"
+ if (flags & kWorkgroup_Flag) {
+ result += "workgroup ";
+ }
+
+ if (!result.empty()) {
+ result.pop_back();
+ }
+ return result;
+ }
+
+ bool operator==(const Modifiers& other) const {
+ return fLayout == other.fLayout && fFlags == other.fFlags;
+ }
+
+ bool operator!=(const Modifiers& other) const {
+ return !(*this == other);
+ }
+
+ /**
+ * Verifies that only permitted modifiers and layout flags are included. Reports errors and
+ * returns false in the event of a violation.
+ */
+ bool checkPermitted(const Context& context,
+ Position pos,
+ int permittedModifierFlags,
+ int permittedLayoutFlags) const;
+
+ Layout fLayout;
+ int fFlags;
+};
+
+} // namespace SkSL
+
+namespace std {
+
+template <>
+struct hash<SkSL::Modifiers> {
+ size_t operator()(const SkSL::Modifiers& key) const {
+ return (size_t) key.fFlags ^ ((size_t) key.fLayout.fFlags << 8) ^
+ ((size_t) key.fLayout.fBuiltin << 16);
+ }
+};
+
+} // namespace std
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkSLProgramElement.h b/gfx/skia/skia/include/private/SkSLProgramElement.h
new file mode 100644
index 0000000000..34d57bcdf8
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkSLProgramElement.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_PROGRAMELEMENT
+#define SKSL_PROGRAMELEMENT
+
+#include "include/private/SkSLIRNode.h"
+
+#include <memory>
+
+namespace SkSL {
+
+/**
+ * Represents a top-level element (e.g. function or global variable) in a program.
+ */
+class ProgramElement : public IRNode {
+public:
+ using Kind = ProgramElementKind;
+
+ ProgramElement(Position pos, Kind kind)
+ : INHERITED(pos, (int) kind) {
+ SkASSERT(kind >= Kind::kFirst && kind <= Kind::kLast);
+ }
+
+ Kind kind() const {
+ return (Kind) fKind;
+ }
+
+ virtual std::unique_ptr<ProgramElement> clone() const = 0;
+
+private:
+ using INHERITED = IRNode;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkSLProgramKind.h b/gfx/skia/skia/include/private/SkSLProgramKind.h
new file mode 100644
index 0000000000..f2355bd7d8
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkSLProgramKind.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2021 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSLProgramKind_DEFINED
+#define SkSLProgramKind_DEFINED
+
+#include <cinttypes>
+
+namespace SkSL {
+
+/**
+ * SkSL supports several different program kinds.
+ */
+enum class ProgramKind : int8_t {
+ kFragment,
+ kVertex,
+ kCompute,
+ kGraphiteFragment,
+ kGraphiteVertex,
+ kRuntimeColorFilter, // Runtime effect only suitable as SkColorFilter
+ kRuntimeShader, // " " " " " SkShader
+ kRuntimeBlender, // " " " " " SkBlender
+ kPrivateRuntimeColorFilter, // Runtime color filter with public restrictions lifted
+ kPrivateRuntimeShader, // Runtime shader " " " "
+ kPrivateRuntimeBlender, // Runtime blender " " " "
+ kMeshVertex, // Vertex portion of a custom mesh
+ kMeshFragment, // Fragment " " " " "
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkSLSampleUsage.h b/gfx/skia/skia/include/private/SkSLSampleUsage.h
new file mode 100644
index 0000000000..39d9e25818
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkSLSampleUsage.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSLSampleUsage_DEFINED
+#define SkSLSampleUsage_DEFINED
+
+#include "include/core/SkTypes.h"
+
+namespace SkSL {
+
+/**
+ * Represents all of the ways that a fragment processor is sampled by its parent.
+ */
+class SampleUsage {
+public:
+ enum class Kind {
+ // Child is never sampled
+ kNone,
+ // Child is only sampled at the same coordinates as the parent
+ kPassThrough,
+ // Child is sampled with a matrix whose value is uniform
+ kUniformMatrix,
+ // Child is sampled with sk_FragCoord.xy
+ kFragCoord,
+ // Child is sampled using explicit coordinates
+ kExplicit,
+ };
+
+ // Make a SampleUsage that corresponds to no sampling of the child at all
+ SampleUsage() = default;
+
+ SampleUsage(Kind kind, bool hasPerspective) : fKind(kind), fHasPerspective(hasPerspective) {
+ if (kind != Kind::kUniformMatrix) {
+ SkASSERT(!fHasPerspective);
+ }
+ }
+
+ // Child is sampled with a matrix whose value is uniform. The name is fixed.
+ static SampleUsage UniformMatrix(bool hasPerspective) {
+ return SampleUsage(Kind::kUniformMatrix, hasPerspective);
+ }
+
+ static SampleUsage Explicit() {
+ return SampleUsage(Kind::kExplicit, false);
+ }
+
+ static SampleUsage PassThrough() {
+ return SampleUsage(Kind::kPassThrough, false);
+ }
+
+ static SampleUsage FragCoord() { return SampleUsage(Kind::kFragCoord, false); }
+
+ bool operator==(const SampleUsage& that) const {
+ return fKind == that.fKind && fHasPerspective == that.fHasPerspective;
+ }
+
+ bool operator!=(const SampleUsage& that) const { return !(*this == that); }
+
+ // Arbitrary name used by all uniform sampling matrices
+ static const char* MatrixUniformName() { return "matrix"; }
+
+ SampleUsage merge(const SampleUsage& other);
+
+ Kind kind() const { return fKind; }
+
+ bool hasPerspective() const { return fHasPerspective; }
+
+ bool isSampled() const { return fKind != Kind::kNone; }
+ bool isPassThrough() const { return fKind == Kind::kPassThrough; }
+ bool isExplicit() const { return fKind == Kind::kExplicit; }
+ bool isUniformMatrix() const { return fKind == Kind::kUniformMatrix; }
+ bool isFragCoord() const { return fKind == Kind::kFragCoord; }
+
+private:
+ Kind fKind = Kind::kNone;
+ bool fHasPerspective = false; // Only valid if fKind is kUniformMatrix
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkSLStatement.h b/gfx/skia/skia/include/private/SkSLStatement.h
new file mode 100644
index 0000000000..3e5f084c75
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkSLStatement.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_STATEMENT
+#define SKSL_STATEMENT
+
+#include "include/private/SkSLIRNode.h"
+#include "include/private/SkSLSymbol.h"
+
+namespace SkSL {
+
+/**
+ * Abstract supertype of all statements.
+ */
+class Statement : public IRNode {
+public:
+ using Kind = StatementKind;
+
+ Statement(Position pos, Kind kind)
+ : INHERITED(pos, (int) kind) {
+ SkASSERT(kind >= Kind::kFirst && kind <= Kind::kLast);
+ }
+
+ Kind kind() const {
+ return (Kind) fKind;
+ }
+
+ virtual bool isEmpty() const {
+ return false;
+ }
+
+ virtual std::unique_ptr<Statement> clone() const = 0;
+
+private:
+ using INHERITED = IRNode;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkSLString.h b/gfx/skia/skia/include/private/SkSLString.h
new file mode 100644
index 0000000000..f8f3768ca8
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkSLString.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_STRING
+#define SKSL_STRING
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLDefines.h"
+
+#include <stdarg.h>
+#include <string>
+#include <string_view>
+
+namespace SkSL {
+
+bool stod(std::string_view s, SKSL_FLOAT* value);
+bool stoi(std::string_view s, SKSL_INT* value);
+
+namespace String {
+
+std::string printf(const char* fmt, ...) SK_PRINTF_LIKE(1, 2);
+void appendf(std::string* str, const char* fmt, ...) SK_PRINTF_LIKE(2, 3);
+void vappendf(std::string* str, const char* fmt, va_list va) SK_PRINTF_LIKE(2, 0);
+
+inline auto Separator() {
+ // This returns a lambda which emits "" the first time it is called, and ", " every subsequent
+ // time it is called.
+ struct Output {
+ const std::string fSpace, fComma;
+ };
+ static const Output* kOutput = new Output{{}, {", "}};
+
+ return [firstSeparator = true]() mutable -> const std::string& {
+ if (firstSeparator) {
+ firstSeparator = false;
+ return kOutput->fSpace;
+ } else {
+ return kOutput->fComma;
+ }
+ };
+}
+
+} // namespace String
+} // namespace SkSL
+
+namespace skstd {
+
+// We use a custom to_string(float|double) which ignores locale settings and writes `1.0` instead
+// of `1.00000`.
+std::string to_string(float value);
+std::string to_string(double value);
+
+} // namespace skstd
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkSLSymbol.h b/gfx/skia/skia/include/private/SkSLSymbol.h
new file mode 100644
index 0000000000..a5b563c5c7
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkSLSymbol.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_SYMBOL
+#define SKSL_SYMBOL
+
+#include "include/private/SkSLIRNode.h"
+#include "include/private/SkSLProgramElement.h"
+
+namespace SkSL {
+
+class Type;
+
+/**
+ * Represents a symboltable entry.
+ */
+class Symbol : public IRNode {
+public:
+ using Kind = SymbolKind;
+
+ Symbol(Position pos, Kind kind, std::string_view name, const Type* type = nullptr)
+ : INHERITED(pos, (int) kind)
+ , fName(name)
+ , fType(type) {
+ SkASSERT(kind >= Kind::kFirst && kind <= Kind::kLast);
+ }
+
+ ~Symbol() override {}
+
+ const Type& type() const {
+ SkASSERT(fType);
+ return *fType;
+ }
+
+ Kind kind() const {
+ return (Kind) fKind;
+ }
+
+ std::string_view name() const {
+ return fName;
+ }
+
+ /**
+ * Don't call this directly--use SymbolTable::renameSymbol instead!
+ */
+ void setName(std::string_view newName) {
+ fName = newName;
+ }
+
+private:
+ std::string_view fName;
+ const Type* fType;
+
+ using INHERITED = IRNode;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkShadowFlags.h b/gfx/skia/skia/include/private/SkShadowFlags.h
new file mode 100644
index 0000000000..99ed6cb8a0
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkShadowFlags.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkShadowFlags_DEFINED
+#define SkShadowFlags_DEFINED
+
+// A set of flags shared between the SkAmbientShadowMaskFilter and the SkSpotShadowMaskFilter
+enum SkShadowFlags {
+ kNone_ShadowFlag = 0x00,
+ /** The occluding object is not opaque. Knowing that the occluder is opaque allows
+ * us to cull shadow geometry behind it and improve performance. */
+ kTransparentOccluder_ShadowFlag = 0x01,
+ /** Don't try to use analytic shadows. */
+ kGeometricOnly_ShadowFlag = 0x02,
+ /** Light position represents a direction, light radius is blur radius at elevation 1 */
+ kDirectionalLight_ShadowFlag = 0x04,
+ /** Concave paths will only use blur to generate the shadow */
+ kConcaveBlurOnly_ShadowFlag = 0x08,
+ /** mask for all shadow flags */
+ kAll_ShadowFlag = 0x0F
+};
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkSpinlock.h b/gfx/skia/skia/include/private/SkSpinlock.h
new file mode 100644
index 0000000000..3816dc9dff
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkSpinlock.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSpinlock_DEFINED
+#define SkSpinlock_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkThreadAnnotations.h"
+#include <atomic>
+
+class SK_CAPABILITY("mutex") SkSpinlock {
+public:
+ constexpr SkSpinlock() = default;
+
+ void acquire() SK_ACQUIRE() {
+ // To act as a mutex, we need an acquire barrier when we acquire the lock.
+ if (fLocked.exchange(true, std::memory_order_acquire)) {
+ // Lock was contended. Fall back to an out-of-line spin loop.
+ this->contendedAcquire();
+ }
+ }
+
+ // Acquire the lock or fail (quickly). Lets the caller decide to do something other than wait.
+ bool tryAcquire() SK_TRY_ACQUIRE(true) {
+ // To act as a mutex, we need an acquire barrier when we acquire the lock.
+ if (fLocked.exchange(true, std::memory_order_acquire)) {
+ // Lock was contended. Let the caller decide what to do.
+ return false;
+ }
+ return true;
+ }
+
+ void release() SK_RELEASE_CAPABILITY() {
+ // To act as a mutex, we need a release barrier when we release the lock.
+ fLocked.store(false, std::memory_order_release);
+ }
+
+private:
+ SK_API void contendedAcquire();
+
+ std::atomic<bool> fLocked{false};
+};
+
+class SK_SCOPED_CAPABILITY SkAutoSpinlock {
+public:
+ SkAutoSpinlock(SkSpinlock& mutex) SK_ACQUIRE(mutex) : fSpinlock(mutex) { fSpinlock.acquire(); }
+ ~SkAutoSpinlock() SK_RELEASE_CAPABILITY() { fSpinlock.release(); }
+
+private:
+ SkSpinlock& fSpinlock;
+};
+
+#endif//SkSpinlock_DEFINED
diff --git a/gfx/skia/skia/include/private/SkWeakRefCnt.h b/gfx/skia/skia/include/private/SkWeakRefCnt.h
new file mode 100644
index 0000000000..058a18652b
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkWeakRefCnt.h
@@ -0,0 +1,173 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkWeakRefCnt_DEFINED
+#define SkWeakRefCnt_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypes.h"
+
+#include <atomic>
+#include <cstdint>
+
+/** \class SkWeakRefCnt
+
+ SkWeakRefCnt is the base class for objects that may be shared by multiple
+ objects. When an existing strong owner wants to share a reference, it calls
+ ref(). When a strong owner wants to release its reference, it calls
+ unref(). When the shared object's strong reference count goes to zero as
+ the result of an unref() call, its (virtual) weak_dispose method is called.
+ It is an error for the destructor to be called explicitly (or via the
+ object going out of scope on the stack or calling delete) if
+ getRefCnt() > 1.
+
+ In addition to strong ownership, an owner may instead obtain a weak
+ reference by calling weak_ref(). A call to weak_ref() must be balanced by a
+ call to weak_unref(). To obtain a strong reference from a weak reference,
+ call try_ref(). If try_ref() returns true, the owner's pointer is now also
+ a strong reference on which unref() must be called. Note that this does not
+ affect the original weak reference, weak_unref() must still be called. When
+ the weak reference count goes to zero, the object is deleted. While the
+ weak reference count is positive and the strong reference count is zero the
+ object still exists, but will be in the disposed state. It is up to the
+ object to define what this means.
+
+ Note that a strong reference implicitly implies a weak reference. As a
+ result, it is allowable for the owner of a strong ref to call try_ref().
+ This will have the same effect as calling ref(), but may be more expensive.
+
+ Example:
+
+ SkWeakRefCnt myRef = strongRef.weak_ref();
+ ... // strongRef.unref() may or may not be called
+ if (myRef.try_ref()) {
+ ... // use myRef
+ myRef.unref();
+ } else {
+ // myRef is in the disposed state
+ }
+ myRef.weak_unref();
+*/
+class SK_API SkWeakRefCnt : public SkRefCnt {
+public:
+ /** Default construct, initializing the reference counts to 1.
+ The strong references collectively hold one weak reference. When the
+ strong reference count goes to zero, the collectively held weak
+ reference is released.
+ */
+ SkWeakRefCnt() : SkRefCnt(), fWeakCnt(1) {}
+
+ /** Destruct, asserting that the weak reference count is 1.
+ */
+ ~SkWeakRefCnt() override {
+#ifdef SK_DEBUG
+ SkASSERT(getWeakCnt() == 1);
+ fWeakCnt.store(0, std::memory_order_relaxed);
+#endif
+ }
+
+#ifdef SK_DEBUG
+ /** Return the weak reference count. */
+ int32_t getWeakCnt() const {
+ return fWeakCnt.load(std::memory_order_relaxed);
+ }
+#endif
+
+private:
+ /** If fRefCnt is 0, returns 0.
+ * Otherwise increments fRefCnt, acquires, and returns the old value.
+ */
+ int32_t atomic_conditional_acquire_strong_ref() const {
+ int32_t prev = fRefCnt.load(std::memory_order_relaxed);
+ do {
+ if (0 == prev) {
+ break;
+ }
+ } while(!fRefCnt.compare_exchange_weak(prev, prev+1, std::memory_order_acquire,
+ std::memory_order_relaxed));
+ return prev;
+ }
+
+public:
+ /** Creates a strong reference from a weak reference, if possible. The
+ caller must already be an owner. If try_ref() returns true the owner
+ is in posession of an additional strong reference. Both the original
+ reference and new reference must be properly unreferenced. If try_ref()
+ returns false, no strong reference could be created and the owner's
+ reference is in the same state as before the call.
+ */
+ bool SK_WARN_UNUSED_RESULT try_ref() const {
+ if (atomic_conditional_acquire_strong_ref() != 0) {
+ // Acquire barrier (L/SL), if not provided above.
+ // Prevents subsequent code from happening before the increment.
+ return true;
+ }
+ return false;
+ }
+
+ /** Increment the weak reference count. Must be balanced by a call to
+ weak_unref().
+ */
+ void weak_ref() const {
+ SkASSERT(getRefCnt() > 0);
+ SkASSERT(getWeakCnt() > 0);
+ // No barrier required.
+ (void)fWeakCnt.fetch_add(+1, std::memory_order_relaxed);
+ }
+
+ /** Decrement the weak reference count. If the weak reference count is 1
+ before the decrement, then call delete on the object. Note that if this
+ is the case, then the object needs to have been allocated via new, and
+ not on the stack.
+ */
+ void weak_unref() const {
+ SkASSERT(getWeakCnt() > 0);
+ // A release here acts in place of all releases we "should" have been doing in ref().
+ if (1 == fWeakCnt.fetch_add(-1, std::memory_order_acq_rel)) {
+ // Like try_ref(), the acquire is only needed on success, to make sure
+ // code in internal_dispose() doesn't happen before the decrement.
+#ifdef SK_DEBUG
+ // so our destructor won't complain
+ fWeakCnt.store(1, std::memory_order_relaxed);
+#endif
+ this->INHERITED::internal_dispose();
+ }
+ }
+
+ /** Returns true if there are no strong references to the object. When this
+ is the case all future calls to try_ref() will return false.
+ */
+ bool weak_expired() const {
+ return fRefCnt.load(std::memory_order_relaxed) == 0;
+ }
+
+protected:
+ /** Called when the strong reference count goes to zero. This allows the
+ object to free any resources it may be holding. Weak references may
+ still exist and their level of allowed access to the object is defined
+ by the object's class.
+ */
+ virtual void weak_dispose() const {
+ }
+
+private:
+ /** Called when the strong reference count goes to zero. Calls weak_dispose
+ on the object and releases the implicit weak reference held
+ collectively by the strong references.
+ */
+ void internal_dispose() const override {
+ weak_dispose();
+ weak_unref();
+ }
+
+ /* Invariant: fWeakCnt = #weak + (fRefCnt > 0 ? 1 : 0) */
+ mutable std::atomic<int32_t> fWeakCnt;
+
+ using INHERITED = SkRefCnt;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/private/base/README.md b/gfx/skia/skia/include/private/base/README.md
new file mode 100644
index 0000000000..7f4f17b228
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/README.md
@@ -0,0 +1,4 @@
+Files in "base" are used by many parts of Skia, but are not part of the public Skia API.
+See also src/base for other files that are part of base, but not needed by the public API.
+
+Files here should not depend on anything other than system headers or other files in base. \ No newline at end of file
diff --git a/gfx/skia/skia/include/private/base/SingleOwner.h b/gfx/skia/skia/include/private/base/SingleOwner.h
new file mode 100644
index 0000000000..473981e1fb
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SingleOwner.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_SingleOwner_DEFINED
+#define skgpu_SingleOwner_DEFINED
+
+#include "include/private/base/SkDebug.h" // IWYU pragma: keep
+
+#if defined(SK_DEBUG)
+#include "include/private/base/SkAssert.h"
+#include "include/private/base/SkMutex.h"
+#include "include/private/base/SkThreadAnnotations.h"
+#include "include/private/base/SkThreadID.h"
+
+#endif
+
+namespace skgpu {
+
+#if defined(SK_DEBUG)
+
+#define SKGPU_ASSERT_SINGLE_OWNER(obj) \
+ skgpu::SingleOwner::AutoEnforce debug_SingleOwner(obj, __FILE__, __LINE__);
+
+// This is a debug tool to verify an object is only being used from one thread at a time.
+class SingleOwner {
+public:
+ SingleOwner() : fOwner(kIllegalThreadID), fReentranceCount(0) {}
+
+ struct AutoEnforce {
+ AutoEnforce(SingleOwner* so, const char* file, int line)
+ : fFile(file), fLine(line), fSO(so) {
+ fSO->enter(file, line);
+ }
+ ~AutoEnforce() { fSO->exit(fFile, fLine); }
+
+ const char* fFile;
+ int fLine;
+ SingleOwner* fSO;
+ };
+
+private:
+ void enter(const char* file, int line) {
+ SkAutoMutexExclusive lock(fMutex);
+ SkThreadID self = SkGetThreadID();
+ SkASSERTF(fOwner == self || fOwner == kIllegalThreadID, "%s:%d Single owner failure.",
+ file, line);
+ fReentranceCount++;
+ fOwner = self;
+ }
+
+ void exit(const char* file, int line) {
+ SkAutoMutexExclusive lock(fMutex);
+ SkASSERTF(fOwner == SkGetThreadID(), "%s:%d Single owner failure.", file, line);
+ fReentranceCount--;
+ if (fReentranceCount == 0) {
+ fOwner = kIllegalThreadID;
+ }
+ }
+
+ SkMutex fMutex;
+ SkThreadID fOwner SK_GUARDED_BY(fMutex);
+ int fReentranceCount SK_GUARDED_BY(fMutex);
+};
+#else
+#define SKGPU_ASSERT_SINGLE_OWNER(obj)
+class SingleOwner {}; // Provide a no-op implementation so we can pass pointers to constructors
+#endif
+
+} // namespace skgpu
+
+#endif
diff --git a/gfx/skia/skia/include/private/base/SkAPI.h b/gfx/skia/skia/include/private/base/SkAPI.h
new file mode 100644
index 0000000000..4028f95d87
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkAPI.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkAPI_DEFINED
+#define SkAPI_DEFINED
+
+#include "include/private/base/SkLoadUserConfig.h" // IWYU pragma: keep
+
+// If SKIA_IMPLEMENTATION is defined as 1, that signals we are building Skia and should
+// export our symbols. If it is not set (or set to 0), then Skia is being used by a client
+// and we should not export our symbols.
+#if !defined(SKIA_IMPLEMENTATION)
+ #define SKIA_IMPLEMENTATION 0
+#endif
+
+// If we are compiling Skia is being as a DLL, we need to be sure to export all of our public
+// APIs to that DLL. If a client is using Skia which was compiled as a DLL, we need to instruct
+// the linker to use the symbols from that DLL. This is the goal of the SK_API define.
+#if !defined(SK_API)
+ #if defined(SKIA_DLL)
+ #if defined(_MSC_VER)
+ #if SKIA_IMPLEMENTATION
+ #define SK_API __declspec(dllexport)
+ #else
+ #define SK_API __declspec(dllimport)
+ #endif
+ #else
+ #define SK_API __attribute__((visibility("default")))
+ #endif
+ #else
+ #define SK_API
+ #endif
+#endif
+
+// SK_SPI is functionally identical to SK_API, but used within src to clarify that it's less stable
+#if !defined(SK_SPI)
+ #define SK_SPI SK_API
+#endif
+
+// See https://clang.llvm.org/docs/AttributeReference.html#availability
+// The API_AVAILABLE macro comes from <os/availability.h> on MacOS
+#if defined(SK_ENABLE_API_AVAILABLE)
+# define SK_API_AVAILABLE API_AVAILABLE
+#else
+# define SK_API_AVAILABLE(...)
+#endif
+
+#endif
diff --git a/gfx/skia/skia/include/private/base/SkAlign.h b/gfx/skia/skia/include/private/base/SkAlign.h
new file mode 100644
index 0000000000..2b2138ddd4
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkAlign.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkAlign_DEFINED
+#define SkAlign_DEFINED
+
+#include "include/private/base/SkAssert.h"
+
+#include <cstddef>
+
+template <typename T> static constexpr T SkAlign2(T x) { return (x + 1) >> 1 << 1; }
+template <typename T> static constexpr T SkAlign4(T x) { return (x + 3) >> 2 << 2; }
+template <typename T> static constexpr T SkAlign8(T x) { return (x + 7) >> 3 << 3; }
+
+template <typename T> static constexpr bool SkIsAlign2(T x) { return 0 == (x & 1); }
+template <typename T> static constexpr bool SkIsAlign4(T x) { return 0 == (x & 3); }
+template <typename T> static constexpr bool SkIsAlign8(T x) { return 0 == (x & 7); }
+
+template <typename T> static constexpr T SkAlignPtr(T x) {
+ return sizeof(void*) == 8 ? SkAlign8(x) : SkAlign4(x);
+}
+template <typename T> static constexpr bool SkIsAlignPtr(T x) {
+ return sizeof(void*) == 8 ? SkIsAlign8(x) : SkIsAlign4(x);
+}
+
+/**
+ * align up to a power of 2
+ */
+static inline constexpr size_t SkAlignTo(size_t x, size_t alignment) {
+ // The same as alignment && SkIsPow2(value), w/o a dependency cycle.
+ SkASSERT(alignment && (alignment & (alignment - 1)) == 0);
+ return (x + alignment - 1) & ~(alignment - 1);
+}
+
+#endif
diff --git a/gfx/skia/skia/include/private/base/SkAlignedStorage.h b/gfx/skia/skia/include/private/base/SkAlignedStorage.h
new file mode 100644
index 0000000000..532ad03978
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkAlignedStorage.h
@@ -0,0 +1,32 @@
+// Copyright 2022 Google LLC
+// Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
+
+#ifndef SkAlignedStorage_DEFINED
+#define SkAlignedStorage_DEFINED
+
+#include <cstddef>
+#include <iterator>
+
+template <int N, typename T> class SkAlignedSTStorage {
+public:
+ SkAlignedSTStorage() {}
+ SkAlignedSTStorage(SkAlignedSTStorage&&) = delete;
+ SkAlignedSTStorage(const SkAlignedSTStorage&) = delete;
+ SkAlignedSTStorage& operator=(SkAlignedSTStorage&&) = delete;
+ SkAlignedSTStorage& operator=(const SkAlignedSTStorage&) = delete;
+
+ // Returns void* because this object does not initialize the
+ // memory. Use placement new for types that require a constructor.
+ void* get() { return fStorage; }
+ const void* get() const { return fStorage; }
+
+ // Act as a container of bytes because the storage is uninitialized.
+ std::byte* data() { return fStorage; }
+ const std::byte* data() const { return fStorage; }
+ size_t size() const { return std::size(fStorage); }
+
+private:
+ alignas(T) std::byte fStorage[sizeof(T) * N];
+};
+
+#endif // SkAlignedStorage_DEFINED
diff --git a/gfx/skia/skia/include/private/base/SkAssert.h b/gfx/skia/skia/include/private/base/SkAssert.h
new file mode 100644
index 0000000000..053e25f22b
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkAssert.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkAssert_DEFINED
+#define SkAssert_DEFINED
+
+#include "include/private/base/SkAPI.h"
+#include "include/private/base/SkDebug.h" // IWYU pragma: keep
+
+/** Called internally if we hit an unrecoverable error.
+ The platform implementation must not return, but should either throw
+ an exception or otherwise exit.
+*/
+[[noreturn]] SK_API extern void sk_abort_no_print(void);
+SK_API extern bool sk_abort_is_enabled();
+
+#if defined(SK_BUILD_FOR_GOOGLE3)
+ void SkDebugfForDumpStackTrace(const char* data, void* unused);
+ namespace base {
+ void DumpStackTrace(int skip_count, void w(const char*, void*), void* arg);
+ }
+# define SK_DUMP_GOOGLE3_STACK() ::base::DumpStackTrace(0, SkDebugfForDumpStackTrace, nullptr)
+#else
+# define SK_DUMP_GOOGLE3_STACK()
+#endif
+
+#if !defined(SK_ABORT)
+# if defined(SK_BUILD_FOR_WIN)
+ // This style lets Visual Studio follow errors back to the source file.
+# define SK_DUMP_LINE_FORMAT "%s(%d)"
+# else
+# define SK_DUMP_LINE_FORMAT "%s:%d"
+# endif
+# define SK_ABORT(message, ...) \
+ do { if (sk_abort_is_enabled()) { \
+ SkDebugf(SK_DUMP_LINE_FORMAT ": fatal error: \"" message "\"\n", \
+ __FILE__, __LINE__, ##__VA_ARGS__); \
+ SK_DUMP_GOOGLE3_STACK(); \
+ sk_abort_no_print(); \
+ } } while (false)
+#endif
+
+// SkASSERT, SkASSERTF and SkASSERT_RELEASE can be used as stand alone assertion expressions, e.g.
+// uint32_t foo(int x) {
+// SkASSERT(x > 4);
+// return x - 4;
+// }
+// and are also written to be compatible with constexpr functions:
+// constexpr uint32_t foo(int x) {
+// return SkASSERT(x > 4),
+// x - 4;
+// }
+#define SkASSERT_RELEASE(cond) \
+ static_cast<void>( (cond) ? (void)0 : []{ SK_ABORT("assert(%s)", #cond); }() )
+
+#if defined(SK_DEBUG)
+ #define SkASSERT(cond) SkASSERT_RELEASE(cond)
+ #define SkASSERTF(cond, fmt, ...) static_cast<void>( (cond) ? (void)0 : [&]{ \
+ SkDebugf(fmt"\n", ##__VA_ARGS__); \
+ SK_ABORT("assert(%s)", #cond); \
+ }() )
+ #define SkDEBUGFAIL(message) SK_ABORT("%s", message)
+ #define SkDEBUGFAILF(fmt, ...) SK_ABORT(fmt, ##__VA_ARGS__)
+ #define SkAssertResult(cond) SkASSERT(cond)
+#else
+ #define SkASSERT(cond) static_cast<void>(0)
+ #define SkASSERTF(cond, fmt, ...) static_cast<void>(0)
+ #define SkDEBUGFAIL(message)
+ #define SkDEBUGFAILF(fmt, ...)
+
+ // unlike SkASSERT, this macro executes its condition in the non-debug build.
+ // The if is present so that this can be used with functions marked SK_WARN_UNUSED_RESULT.
+ #define SkAssertResult(cond) if (cond) {} do {} while(false)
+#endif
+
+#if !defined(SkUNREACHABLE)
+# if defined(_MSC_VER) && !defined(__clang__)
+# include <intrin.h>
+# define FAST_FAIL_INVALID_ARG 5
+// See https://developercommunity.visualstudio.com/content/problem/1128631/code-flow-doesnt-see-noreturn-with-extern-c.html
+// for why this is wrapped. Hopefully removable after msvc++ 19.27 is no longer supported.
+[[noreturn]] static inline void sk_fast_fail() { __fastfail(FAST_FAIL_INVALID_ARG); }
+# define SkUNREACHABLE sk_fast_fail()
+# else
+# define SkUNREACHABLE __builtin_trap()
+# endif
+#endif
+
+#endif
diff --git a/gfx/skia/skia/include/private/base/SkAttributes.h b/gfx/skia/skia/include/private/base/SkAttributes.h
new file mode 100644
index 0000000000..038a800e97
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkAttributes.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkAttributes_DEFINED
+#define SkAttributes_DEFINED
+
+#include "include/private/base/SkFeatures.h" // IWYU pragma: keep
+#include "include/private/base/SkLoadUserConfig.h" // IWYU pragma: keep
+
+#if defined(__clang__) || defined(__GNUC__)
+# define SK_ATTRIBUTE(attr) __attribute__((attr))
+#else
+# define SK_ATTRIBUTE(attr)
+#endif
+
+#if !defined(SK_UNUSED)
+# if !defined(__clang__) && defined(_MSC_VER)
+# define SK_UNUSED __pragma(warning(suppress:4189))
+# else
+# define SK_UNUSED SK_ATTRIBUTE(unused)
+# endif
+#endif
+
+#if !defined(SK_WARN_UNUSED_RESULT)
+ #define SK_WARN_UNUSED_RESULT SK_ATTRIBUTE(warn_unused_result)
+#endif
+
+/**
+ * If your judgment is better than the compiler's (i.e. you've profiled it),
+ * you can use SK_ALWAYS_INLINE to force inlining. E.g.
+ * inline void someMethod() { ... } // may not be inlined
+ * SK_ALWAYS_INLINE void someMethod() { ... } // should always be inlined
+ */
+#if !defined(SK_ALWAYS_INLINE)
+# if defined(SK_BUILD_FOR_WIN)
+# define SK_ALWAYS_INLINE __forceinline
+# else
+# define SK_ALWAYS_INLINE SK_ATTRIBUTE(always_inline) inline
+# endif
+#endif
+
+/**
+ * If your judgment is better than the compiler's (i.e. you've profiled it),
+ * you can use SK_NEVER_INLINE to prevent inlining.
+ */
+#if !defined(SK_NEVER_INLINE)
+# if defined(SK_BUILD_FOR_WIN)
+# define SK_NEVER_INLINE __declspec(noinline)
+# else
+# define SK_NEVER_INLINE SK_ATTRIBUTE(noinline)
+# endif
+#endif
+
+/**
+ * Used to annotate a function as taking printf style arguments.
+ * `A` is the (1 based) index of the format string argument.
+ * `B` is the (1 based) index of the first argument used by the format string.
+ */
+#if !defined(SK_PRINTF_LIKE)
+# define SK_PRINTF_LIKE(A, B) SK_ATTRIBUTE(format(printf, (A), (B)))
+#endif
+
+/**
+ * Used to ignore sanitizer warnings.
+ */
+#if !defined(SK_NO_SANITIZE)
+# define SK_NO_SANITIZE(A) SK_ATTRIBUTE(no_sanitize(A))
+#endif
+
+/**
+ * Annotates a class' non-trivial special functions as trivial for the purposes of calls.
+ * Allows a class with a non-trivial destructor to be __is_trivially_relocatable.
+ * Use of this attribute on a public API breaks platform ABI.
+ * Annotated classes may not hold pointers derived from `this`.
+ * Annotated classes must implement move+delete as equivalent to memcpy+free.
+ * Use may require more complete types, as callee destroys.
+ *
+ * https://clang.llvm.org/docs/AttributeReference.html#trivial-abi
+ * https://libcxx.llvm.org/DesignDocs/UniquePtrTrivialAbi.html
+ */
+#if !defined(SK_TRIVIAL_ABI)
+# define SK_TRIVIAL_ABI
+#endif
+
+#endif
diff --git a/gfx/skia/skia/include/private/base/SkCPUTypes.h b/gfx/skia/skia/include/private/base/SkCPUTypes.h
new file mode 100644
index 0000000000..a5f60fd3ef
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkCPUTypes.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2023 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkCPUTypes_DEFINED
+#define SkCPUTypes_DEFINED
+
+// TODO(bungeman,kjlubick) There are a lot of assumptions throughout the codebase that
+// these types are 32 bits, when they could be more or less. Public APIs should stop
+// using these. Internally, we could use uint_fast8_t and uint_fast16_t, but not in
+// public APIs due to ABI incompatibilities.
+
+/** Fast type for unsigned 8 bits. Use for parameter passing and local
+ variables, not for storage
+*/
+typedef unsigned U8CPU;
+
+/** Fast type for unsigned 16 bits. Use for parameter passing and local
+ variables, not for storage
+*/
+typedef unsigned U16CPU;
+
+#endif
diff --git a/gfx/skia/skia/include/private/base/SkContainers.h b/gfx/skia/skia/include/private/base/SkContainers.h
new file mode 100644
index 0000000000..2ece73e287
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkContainers.h
@@ -0,0 +1,46 @@
+// Copyright 2022 Google LLC.
+// Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
+
+#ifndef SkContainers_DEFINED
+#define SkContainers_DEFINED
+
+#include "include/private/base/SkAPI.h"
+#include "include/private/base/SkSpan_impl.h"
+
+#include <cstddef>
+#include <cstdint>
+
+class SK_SPI SkContainerAllocator {
+public:
+ SkContainerAllocator(size_t sizeOfT, int maxCapacity)
+ : fSizeOfT{sizeOfT}
+ , fMaxCapacity{maxCapacity} {}
+
+ // allocate will abort on failure. Given a capacity of 0, it will return the empty span.
+ // The bytes allocated are freed using sk_free().
+ SkSpan<std::byte> allocate(int capacity, double growthFactor = 1.0);
+
+private:
+ friend struct SkContainerAllocatorTestingPeer;
+ // All capacity counts will be rounded up to kCapacityMultiple.
+ // TODO: this is a constant from the original SkTArray code. This should be checked some how.
+ static constexpr int64_t kCapacityMultiple = 8;
+
+ // Rounds up capacity to next multiple of kCapacityMultiple and pin to fMaxCapacity.
+ size_t roundUpCapacity(int64_t capacity) const;
+
+ // Grows the capacity by growthFactor being sure to stay with in kMinBytes and fMaxCapacity.
+ size_t growthFactorCapacity(int capacity, double growthFactor) const;
+
+ const size_t fSizeOfT;
+ const int64_t fMaxCapacity;
+};
+
+// sk_allocate_canfail returns the empty span on failure. Parameter size must be > 0.
+SkSpan<std::byte> sk_allocate_canfail(size_t size);
+
+// Returns the empty span if size is 0. sk_allocate_throw aborts on failure.
+SkSpan<std::byte> sk_allocate_throw(size_t size);
+
+SK_SPI void sk_report_container_overflow_and_die();
+#endif // SkContainers_DEFINED
diff --git a/gfx/skia/skia/include/private/base/SkDebug.h b/gfx/skia/skia/include/private/base/SkDebug.h
new file mode 100644
index 0000000000..2e4810fc1c
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkDebug.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDebug_DEFINED
+#define SkDebug_DEFINED
+
+#include "include/private/base/SkAPI.h"
+#include "include/private/base/SkAttributes.h"
+#include "include/private/base/SkLoadUserConfig.h" // IWYU pragma: keep
+
+#if !defined(SkDebugf)
+ void SK_SPI SkDebugf(const char format[], ...) SK_PRINTF_LIKE(1, 2);
+#endif
+
+#if defined(SK_DEBUG)
+ #define SkDEBUGCODE(...) __VA_ARGS__
+ #define SkDEBUGF(...) SkDebugf(__VA_ARGS__)
+#else
+ #define SkDEBUGCODE(...)
+ #define SkDEBUGF(...)
+#endif
+
+#endif
diff --git a/gfx/skia/skia/include/private/base/SkDeque.h b/gfx/skia/skia/include/private/base/SkDeque.h
new file mode 100644
index 0000000000..fbc6167313
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkDeque.h
@@ -0,0 +1,143 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkDeque_DEFINED
+#define SkDeque_DEFINED
+
+#include "include/private/base/SkAPI.h"
+
+#include <cstddef>
+
+/*
+ * The deque class works by blindly creating memory space of a specified element
+ * size. It manages the memory as a doubly linked list of blocks each of which
+ * can contain multiple elements. Pushes and pops add/remove blocks from the
+ * beginning/end of the list as necessary while each block tracks the used
+ * portion of its memory.
+ * One behavior to be aware of is that the pops do not immediately remove an
+ * empty block from the beginning/end of the list (Presumably so push/pop pairs
+ * on the block boundaries don't cause thrashing). This can result in the first/
+ * last element not residing in the first/last block.
+ */
+class SK_API SkDeque {
+public:
+ /**
+ * elemSize specifies the size of each individual element in the deque
+ * allocCount specifies how many elements are to be allocated as a block
+ */
+ explicit SkDeque(size_t elemSize, int allocCount = 1);
+ SkDeque(size_t elemSize, void* storage, size_t storageSize, int allocCount = 1);
+ ~SkDeque();
+
+ bool empty() const { return 0 == fCount; }
+ int count() const { return fCount; }
+ size_t elemSize() const { return fElemSize; }
+
+ const void* front() const { return fFront; }
+ const void* back() const { return fBack; }
+
+ void* front() {
+ return (void*)((const SkDeque*)this)->front();
+ }
+
+ void* back() {
+ return (void*)((const SkDeque*)this)->back();
+ }
+
+ /**
+ * push_front and push_back return a pointer to the memory space
+ * for the new element
+ */
+ void* push_front();
+ void* push_back();
+
+ void pop_front();
+ void pop_back();
+
+private:
+ struct Block;
+
+public:
+ class Iter {
+ public:
+ enum IterStart {
+ kFront_IterStart,
+ kBack_IterStart,
+ };
+
+ /**
+ * Creates an uninitialized iterator. Must be reset()
+ */
+ Iter();
+
+ Iter(const SkDeque& d, IterStart startLoc);
+ void* next();
+ void* prev();
+
+ void reset(const SkDeque& d, IterStart startLoc);
+
+ private:
+ SkDeque::Block* fCurBlock;
+ char* fPos;
+ size_t fElemSize;
+ };
+
+ // Inherit privately from Iter to prevent access to reverse iteration
+ class F2BIter : private Iter {
+ public:
+ F2BIter() {}
+
+ /**
+ * Wrap Iter's 2 parameter ctor to force initialization to the
+ * beginning of the deque
+ */
+ F2BIter(const SkDeque& d) : INHERITED(d, kFront_IterStart) {}
+
+ using Iter::next;
+
+ /**
+ * Wrap Iter::reset to force initialization to the beginning of the
+ * deque
+ */
+ void reset(const SkDeque& d) {
+ this->INHERITED::reset(d, kFront_IterStart);
+ }
+
+ private:
+ using INHERITED = Iter;
+ };
+
+private:
+ // allow unit test to call numBlocksAllocated
+ friend class DequeUnitTestHelper;
+
+ void* fFront;
+ void* fBack;
+
+ Block* fFrontBlock;
+ Block* fBackBlock;
+ size_t fElemSize;
+ void* fInitialStorage;
+ int fCount; // number of elements in the deque
+ int fAllocCount; // number of elements to allocate per block
+
+ Block* allocateBlock(int allocCount);
+ void freeBlock(Block* block);
+
+ /**
+ * This returns the number of chunk blocks allocated by the deque. It
+ * can be used to gauge the effectiveness of the selected allocCount.
+ */
+ int numBlocksAllocated() const;
+
+ SkDeque(const SkDeque&) = delete;
+ SkDeque& operator=(const SkDeque&) = delete;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/private/base/SkFeatures.h b/gfx/skia/skia/include/private/base/SkFeatures.h
new file mode 100644
index 0000000000..662bf03211
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkFeatures.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFeatures_DEFINED
+#define SkFeatures_DEFINED
+
+#if !defined(SK_BUILD_FOR_ANDROID) && !defined(SK_BUILD_FOR_IOS) && !defined(SK_BUILD_FOR_WIN) && \
+ !defined(SK_BUILD_FOR_UNIX) && !defined(SK_BUILD_FOR_MAC)
+
+ #ifdef __APPLE__
+ #include <TargetConditionals.h>
+ #endif
+
+ #if defined(_WIN32) || defined(__SYMBIAN32__)
+ #define SK_BUILD_FOR_WIN
+ #elif defined(ANDROID) || defined(__ANDROID__)
+ #define SK_BUILD_FOR_ANDROID
+ #elif defined(linux) || defined(__linux) || defined(__FreeBSD__) || \
+ defined(__OpenBSD__) || defined(__sun) || defined(__NetBSD__) || \
+ defined(__DragonFly__) || defined(__Fuchsia__) || \
+ defined(__GLIBC__) || defined(__GNU__) || defined(__unix__)
+ #define SK_BUILD_FOR_UNIX
+ #elif TARGET_OS_IPHONE || TARGET_IPHONE_SIMULATOR
+ #define SK_BUILD_FOR_IOS
+ #else
+ #define SK_BUILD_FOR_MAC
+ #endif
+#endif // end SK_BUILD_FOR_*
+
+
+#if defined(SK_BUILD_FOR_WIN) && !defined(__clang__)
+ #if !defined(SK_RESTRICT)
+ #define SK_RESTRICT __restrict
+ #endif
+ #if !defined(SK_WARN_UNUSED_RESULT)
+ #define SK_WARN_UNUSED_RESULT
+ #endif
+#endif
+
+#if !defined(SK_RESTRICT)
+ #define SK_RESTRICT __restrict__
+#endif
+
+#if !defined(SK_CPU_BENDIAN) && !defined(SK_CPU_LENDIAN)
+ #if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
+ #define SK_CPU_BENDIAN
+ #elif defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
+ #define SK_CPU_LENDIAN
+ #elif defined(__sparc) || defined(__sparc__) || \
+ defined(_POWER) || defined(__powerpc__) || \
+ defined(__ppc__) || defined(__hppa) || \
+ defined(__PPC__) || defined(__PPC64__) || \
+ defined(_MIPSEB) || defined(__ARMEB__) || \
+ defined(__s390__) || \
+ (defined(__sh__) && defined(__BIG_ENDIAN__)) || \
+ (defined(__ia64) && defined(__BIG_ENDIAN__))
+ #define SK_CPU_BENDIAN
+ #else
+ #define SK_CPU_LENDIAN
+ #endif
+#endif
+
+#if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
+ #define SK_CPU_X86 1
+#endif
+
+/**
+ * SK_CPU_SSE_LEVEL
+ *
+ * If defined, SK_CPU_SSE_LEVEL should be set to the highest supported level.
+ * On non-intel CPU this should be undefined.
+ */
+#define SK_CPU_SSE_LEVEL_SSE1 10
+#define SK_CPU_SSE_LEVEL_SSE2 20
+#define SK_CPU_SSE_LEVEL_SSE3 30
+#define SK_CPU_SSE_LEVEL_SSSE3 31
+#define SK_CPU_SSE_LEVEL_SSE41 41
+#define SK_CPU_SSE_LEVEL_SSE42 42
+#define SK_CPU_SSE_LEVEL_AVX 51
+#define SK_CPU_SSE_LEVEL_AVX2 52
+#define SK_CPU_SSE_LEVEL_SKX 60
+
+// TODO(brianosman,kjlubick) clean up these checks
+
+// Are we in GCC/Clang?
+#ifndef SK_CPU_SSE_LEVEL
+ // These checks must be done in descending order to ensure we set the highest
+ // available SSE level.
+ #if defined(__AVX512F__) && defined(__AVX512DQ__) && defined(__AVX512CD__) && \
+ defined(__AVX512BW__) && defined(__AVX512VL__)
+ #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_SKX
+ #elif defined(__AVX2__)
+ #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_AVX2
+ #elif defined(__AVX__)
+ #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_AVX
+ #elif defined(__SSE4_2__)
+ #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_SSE42
+ #elif defined(__SSE4_1__)
+ #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_SSE41
+ #elif defined(__SSSE3__)
+ #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_SSSE3
+ #elif defined(__SSE3__)
+ #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_SSE3
+ #elif defined(__SSE2__)
+ #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_SSE2
+ #endif
+#endif
+
+// Are we in VisualStudio?
+#ifndef SK_CPU_SSE_LEVEL
+ // These checks must be done in descending order to ensure we set the highest
+ // available SSE level. 64-bit intel guarantees at least SSE2 support.
+ #if defined(__AVX512F__) && defined(__AVX512DQ__) && defined(__AVX512CD__) && \
+ defined(__AVX512BW__) && defined(__AVX512VL__)
+ #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_SKX
+ #elif defined(__AVX2__)
+ #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_AVX2
+ #elif defined(__AVX__)
+ #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_AVX
+ #elif defined(_M_X64) || defined(_M_AMD64)
+ #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_SSE2
+ #elif defined(_M_IX86_FP)
+ #if _M_IX86_FP >= 2
+ #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_SSE2
+ #elif _M_IX86_FP == 1
+ #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_SSE1
+ #endif
+ #endif
+#endif
+
+// ARM defines
+#if defined(__arm__) && (!defined(__APPLE__) || !TARGET_IPHONE_SIMULATOR)
+ #define SK_CPU_ARM32
+#elif defined(__aarch64__)
+ #define SK_CPU_ARM64
+#endif
+
+// All 64-bit ARM chips have NEON. Many 32-bit ARM chips do too.
+#if !defined(SK_ARM_HAS_NEON) && defined(__ARM_NEON)
+ #define SK_ARM_HAS_NEON
+#endif
+
+#if defined(__ARM_FEATURE_CRC32)
+ #define SK_ARM_HAS_CRC32
+#endif
+
+#endif // SkFeatures_DEFINED
diff --git a/gfx/skia/skia/include/private/base/SkFixed.h b/gfx/skia/skia/include/private/base/SkFixed.h
new file mode 100644
index 0000000000..2c8f2fb56c
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkFixed.h
@@ -0,0 +1,143 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFixed_DEFINED
+#define SkFixed_DEFINED
+
+#include "include/private/base/SkAssert.h"
+#include "include/private/base/SkDebug.h"
+#include "include/private/base/SkMath.h" // IWYU pragma: keep
+#include "include/private/base/SkTPin.h" // IWYU pragma: keep
+
+#include <cstdint>
+
+/** \file SkFixed.h
+
+ Types and macros for 16.16 fixed point
+*/
+
+/** 32 bit signed integer used to represent fractions values with 16 bits to the right of the decimal point
+*/
+typedef int32_t SkFixed;
+#define SK_Fixed1 (1 << 16)
+#define SK_FixedHalf (1 << 15)
+#define SK_FixedQuarter (1 << 14)
+#define SK_FixedMax (0x7FFFFFFF)
+#define SK_FixedMin (-SK_FixedMax)
+#define SK_FixedPI (0x3243F)
+#define SK_FixedSqrt2 (92682)
+#define SK_FixedTanPIOver8 (0x6A0A)
+#define SK_FixedRoot2Over2 (0xB505)
+
+// NOTE: SkFixedToFloat is exact. SkFloatToFixed seems to lack a rounding step. For all fixed-point
+// values, this version is as accurate as possible for (fixed -> float -> fixed). Rounding reduces
+// accuracy if the intermediate floats are in the range that only holds integers (adding 0.5f to an
+// odd integer then snaps to nearest even). Using double for the rounding math gives maximum
+// accuracy for (float -> fixed -> float), but that's usually overkill.
+#define SkFixedToFloat(x) ((x) * 1.52587890625e-5f)
+#define SkFloatToFixed(x) sk_float_saturate2int((x) * SK_Fixed1)
+
+#ifdef SK_DEBUG
+ static inline SkFixed SkFloatToFixed_Check(float x) {
+ int64_t n64 = (int64_t)(x * SK_Fixed1);
+ SkFixed n32 = (SkFixed)n64;
+ SkASSERT(n64 == n32);
+ return n32;
+ }
+#else
+ #define SkFloatToFixed_Check(x) SkFloatToFixed(x)
+#endif
+
+#define SkFixedToDouble(x) ((x) * 1.52587890625e-5)
+#define SkDoubleToFixed(x) ((SkFixed)((x) * SK_Fixed1))
+
+/** Converts an integer to a SkFixed, asserting that the result does not overflow
+ a 32 bit signed integer
+*/
+#ifdef SK_DEBUG
+ inline SkFixed SkIntToFixed(int n)
+ {
+ SkASSERT(n >= -32768 && n <= 32767);
+ // Left shifting a negative value has undefined behavior in C, so we cast to unsigned before
+ // shifting.
+ return (SkFixed)( (unsigned)n << 16 );
+ }
+#else
+ // Left shifting a negative value has undefined behavior in C, so we cast to unsigned before
+ // shifting. Then we force the cast to SkFixed to ensure that the answer is signed (like the
+ // debug version).
+ #define SkIntToFixed(n) (SkFixed)((unsigned)(n) << 16)
+#endif
+
+#define SkFixedRoundToInt(x) (((x) + SK_FixedHalf) >> 16)
+#define SkFixedCeilToInt(x) (((x) + SK_Fixed1 - 1) >> 16)
+#define SkFixedFloorToInt(x) ((x) >> 16)
+
+static inline SkFixed SkFixedRoundToFixed(SkFixed x) {
+ return (SkFixed)( (uint32_t)(x + SK_FixedHalf) & 0xFFFF0000 );
+}
+static inline SkFixed SkFixedCeilToFixed(SkFixed x) {
+ return (SkFixed)( (uint32_t)(x + SK_Fixed1 - 1) & 0xFFFF0000 );
+}
+static inline SkFixed SkFixedFloorToFixed(SkFixed x) {
+ return (SkFixed)( (uint32_t)x & 0xFFFF0000 );
+}
+
+#define SkFixedAve(a, b) (((a) + (b)) >> 1)
+
+// The divide may exceed 32 bits. Clamp to a signed 32 bit result.
+#define SkFixedDiv(numer, denom) \
+ SkToS32(SkTPin<int64_t>((SkLeftShift((int64_t)(numer), 16) / (denom)), SK_MinS32, SK_MaxS32))
+
+static inline SkFixed SkFixedMul(SkFixed a, SkFixed b) {
+ return (SkFixed)((int64_t)a * b >> 16);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Platform-specific alternatives to our portable versions.
+
+// The VCVT float-to-fixed instruction is part of the VFPv3 instruction set.
+#if defined(__ARM_VFPV3__)
+ #include <cstring>
+
+ /* This does not handle NaN or other obscurities, but is faster than
+ than (int)(x*65536). When built on Android with -Os, needs forcing
+ to inline or we lose the speed benefit.
+ */
+ SK_ALWAYS_INLINE SkFixed SkFloatToFixed_arm(float x)
+ {
+ int32_t y;
+ asm("vcvt.s32.f32 %0, %0, #16": "+w"(x));
+ std::memcpy(&y, &x, sizeof(y));
+ return y;
+ }
+ #undef SkFloatToFixed
+ #define SkFloatToFixed(x) SkFloatToFixed_arm(x)
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+#define SkFixedToScalar(x) SkFixedToFloat(x)
+#define SkScalarToFixed(x) SkFloatToFixed(x)
+
+///////////////////////////////////////////////////////////////////////////////
+
+typedef int64_t SkFixed3232; // 32.32
+
+#define SkFixed3232Max SK_MaxS64
+#define SkFixed3232Min (-SkFixed3232Max)
+
+#define SkIntToFixed3232(x) (SkLeftShift((SkFixed3232)(x), 32))
+#define SkFixed3232ToInt(x) ((int)((x) >> 32))
+#define SkFixedToFixed3232(x) (SkLeftShift((SkFixed3232)(x), 16))
+#define SkFixed3232ToFixed(x) ((SkFixed)((x) >> 16))
+#define SkFloatToFixed3232(x) sk_float_saturate2int64((x) * (65536.0f * 65536.0f))
+#define SkFixed3232ToFloat(x) (x * (1 / (65536.0f * 65536.0f)))
+
+#define SkScalarToFixed3232(x) SkFloatToFixed3232(x)
+
+#endif
diff --git a/gfx/skia/skia/include/private/base/SkFloatBits.h b/gfx/skia/skia/include/private/base/SkFloatBits.h
new file mode 100644
index 0000000000..37a7b271ae
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkFloatBits.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2008 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFloatBits_DEFINED
+#define SkFloatBits_DEFINED
+
+#include "include/private/base/SkMath.h"
+
+#include <cstdint>
+
+/** Convert a sign-bit int (i.e. float interpreted as int) into a 2s compliement
+ int. This also converts -0 (0x80000000) to 0. Doing this to a float allows
+ it to be compared using normal C operators (<, <=, etc.)
+*/
+static inline int32_t SkSignBitTo2sCompliment(int32_t x) {
+ if (x < 0) {
+ x &= 0x7FFFFFFF;
+ x = -x;
+ }
+ return x;
+}
+
+/** Convert a 2s compliment int to a sign-bit (i.e. int interpreted as float).
+ This undoes the result of SkSignBitTo2sCompliment().
+ */
+static inline int32_t Sk2sComplimentToSignBit(int32_t x) {
+ int sign = x >> 31;
+ // make x positive
+ x = (x ^ sign) - sign;
+ // set the sign bit as needed
+ x |= SkLeftShift(sign, 31);
+ return x;
+}
+
+union SkFloatIntUnion {
+ float fFloat;
+ int32_t fSignBitInt;
+};
+
+// Helper to see a float as its bit pattern (w/o aliasing warnings)
+static inline int32_t SkFloat2Bits(float x) {
+ SkFloatIntUnion data;
+ data.fFloat = x;
+ return data.fSignBitInt;
+}
+
+// Helper to see a bit pattern as a float (w/o aliasing warnings)
+static inline float SkBits2Float(int32_t floatAsBits) {
+ SkFloatIntUnion data;
+ data.fSignBitInt = floatAsBits;
+ return data.fFloat;
+}
+
+constexpr int32_t gFloatBits_exponent_mask = 0x7F800000;
+constexpr int32_t gFloatBits_matissa_mask = 0x007FFFFF;
+
+static inline bool SkFloatBits_IsFinite(int32_t bits) {
+ return (bits & gFloatBits_exponent_mask) != gFloatBits_exponent_mask;
+}
+
+static inline bool SkFloatBits_IsInf(int32_t bits) {
+ return ((bits & gFloatBits_exponent_mask) == gFloatBits_exponent_mask) &&
+ (bits & gFloatBits_matissa_mask) == 0;
+}
+
+/** Return the float as a 2s compliment int. Just to be used to compare floats
+ to each other or against positive float-bit-constants (like 0). This does
+ not return the int equivalent of the float, just something cheaper for
+ compares-only.
+ */
+static inline int32_t SkFloatAs2sCompliment(float x) {
+ return SkSignBitTo2sCompliment(SkFloat2Bits(x));
+}
+
+/** Return the 2s compliment int as a float. This undos the result of
+ SkFloatAs2sCompliment
+ */
+static inline float Sk2sComplimentAsFloat(int32_t x) {
+ return SkBits2Float(Sk2sComplimentToSignBit(x));
+}
+
+// Scalar wrappers for float-bit routines
+
+#define SkScalarAs2sCompliment(x) SkFloatAs2sCompliment(x)
+
+#endif
diff --git a/gfx/skia/skia/include/private/base/SkFloatingPoint.h b/gfx/skia/skia/include/private/base/SkFloatingPoint.h
new file mode 100644
index 0000000000..4b2eb4d897
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkFloatingPoint.h
@@ -0,0 +1,247 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFloatingPoint_DEFINED
+#define SkFloatingPoint_DEFINED
+
+#include "include/private/base/SkAttributes.h"
+#include "include/private/base/SkFloatBits.h"
+#include "include/private/base/SkMath.h"
+
+#include <cfloat>
+#include <cmath>
+#include <cstdint>
+#include <cstring>
+
+constexpr float SK_FloatSqrt2 = 1.41421356f;
+constexpr float SK_FloatPI = 3.14159265f;
+constexpr double SK_DoublePI = 3.14159265358979323846264338327950288;
+
+// C++98 cmath std::pow seems to be the earliest portable way to get float pow.
+// However, on Linux including cmath undefines isfinite.
+// http://gcc.gnu.org/bugzilla/show_bug.cgi?id=14608
+static inline float sk_float_pow(float base, float exp) {
+ return powf(base, exp);
+}
+
+#define sk_float_sqrt(x) sqrtf(x)
+#define sk_float_sin(x) sinf(x)
+#define sk_float_cos(x) cosf(x)
+#define sk_float_tan(x) tanf(x)
+#define sk_float_floor(x) floorf(x)
+#define sk_float_ceil(x) ceilf(x)
+#define sk_float_trunc(x) truncf(x)
+#ifdef SK_BUILD_FOR_MAC
+# define sk_float_acos(x) static_cast<float>(acos(x))
+# define sk_float_asin(x) static_cast<float>(asin(x))
+#else
+# define sk_float_acos(x) acosf(x)
+# define sk_float_asin(x) asinf(x)
+#endif
+#define sk_float_atan2(y,x) atan2f(y,x)
+#define sk_float_abs(x) fabsf(x)
+#define sk_float_copysign(x, y) copysignf(x, y)
+#define sk_float_mod(x,y) fmodf(x,y)
+#define sk_float_exp(x) expf(x)
+#define sk_float_log(x) logf(x)
+
+constexpr float sk_float_degrees_to_radians(float degrees) {
+ return degrees * (SK_FloatPI / 180);
+}
+
+constexpr float sk_float_radians_to_degrees(float radians) {
+ return radians * (180 / SK_FloatPI);
+}
+
+// floor(double+0.5) vs. floorf(float+0.5f) give comparable performance, but upcasting to double
+// means tricky values like 0.49999997 and 2^24 get rounded correctly. If these were rounded
+// as floatf(x + .5f), they would be 1 higher than expected.
+#define sk_float_round(x) (float)sk_double_round((double)(x))
+
+// can't find log2f on android, but maybe that just a tool bug?
+#ifdef SK_BUILD_FOR_ANDROID
+ static inline float sk_float_log2(float x) {
+ const double inv_ln_2 = 1.44269504088896;
+ return (float)(log(x) * inv_ln_2);
+ }
+#else
+ #define sk_float_log2(x) log2f(x)
+#endif
+
+static inline bool sk_float_isfinite(float x) {
+ return SkFloatBits_IsFinite(SkFloat2Bits(x));
+}
+
+static inline bool sk_floats_are_finite(float a, float b) {
+ return sk_float_isfinite(a) && sk_float_isfinite(b);
+}
+
+static inline bool sk_floats_are_finite(const float array[], int count) {
+ float prod = 0;
+ for (int i = 0; i < count; ++i) {
+ prod *= array[i];
+ }
+ // At this point, prod will either be NaN or 0
+ return prod == 0; // if prod is NaN, this check will return false
+}
+
+static inline bool sk_float_isinf(float x) {
+ return SkFloatBits_IsInf(SkFloat2Bits(x));
+}
+
+#ifdef SK_BUILD_FOR_WIN
+ #define sk_float_isnan(x) _isnan(x)
+#elif defined(__clang__) || defined(__GNUC__)
+ #define sk_float_isnan(x) __builtin_isnan(x)
+#else
+ #define sk_float_isnan(x) isnan(x)
+#endif
+
+#define sk_double_isnan(a) sk_float_isnan(a)
+
+#define SK_MaxS32FitsInFloat 2147483520
+#define SK_MinS32FitsInFloat -SK_MaxS32FitsInFloat
+
+#define SK_MaxS64FitsInFloat (SK_MaxS64 >> (63-24) << (63-24)) // 0x7fffff8000000000
+#define SK_MinS64FitsInFloat -SK_MaxS64FitsInFloat
+
+/**
+ * Return the closest int for the given float. Returns SK_MaxS32FitsInFloat for NaN.
+ */
+static inline int sk_float_saturate2int(float x) {
+ x = x < SK_MaxS32FitsInFloat ? x : SK_MaxS32FitsInFloat;
+ x = x > SK_MinS32FitsInFloat ? x : SK_MinS32FitsInFloat;
+ return (int)x;
+}
+
+/**
+ * Return the closest int for the given double. Returns SK_MaxS32 for NaN.
+ */
+static inline int sk_double_saturate2int(double x) {
+ x = x < SK_MaxS32 ? x : SK_MaxS32;
+ x = x > SK_MinS32 ? x : SK_MinS32;
+ return (int)x;
+}
+
+/**
+ * Return the closest int64_t for the given float. Returns SK_MaxS64FitsInFloat for NaN.
+ */
+static inline int64_t sk_float_saturate2int64(float x) {
+ x = x < SK_MaxS64FitsInFloat ? x : SK_MaxS64FitsInFloat;
+ x = x > SK_MinS64FitsInFloat ? x : SK_MinS64FitsInFloat;
+ return (int64_t)x;
+}
+
+#define sk_float_floor2int(x) sk_float_saturate2int(sk_float_floor(x))
+#define sk_float_round2int(x) sk_float_saturate2int(sk_float_round(x))
+#define sk_float_ceil2int(x) sk_float_saturate2int(sk_float_ceil(x))
+
+#define sk_float_floor2int_no_saturate(x) (int)sk_float_floor(x)
+#define sk_float_round2int_no_saturate(x) (int)sk_float_round(x)
+#define sk_float_ceil2int_no_saturate(x) (int)sk_float_ceil(x)
+
+#define sk_double_floor(x) floor(x)
+#define sk_double_round(x) floor((x) + 0.5)
+#define sk_double_ceil(x) ceil(x)
+#define sk_double_floor2int(x) (int)sk_double_floor(x)
+#define sk_double_round2int(x) (int)sk_double_round(x)
+#define sk_double_ceil2int(x) (int)sk_double_ceil(x)
+
+// Cast double to float, ignoring any warning about too-large finite values being cast to float.
+// Clang thinks this is undefined, but it's actually implementation defined to return either
+// the largest float or infinity (one of the two bracketing representable floats). Good enough!
+#ifdef __clang__
+SK_NO_SANITIZE("float-cast-overflow")
+#elif defined(__GNUC__)
+SK_ATTRIBUTE(no_sanitize_undefined)
+#endif
+static inline float sk_double_to_float(double x) {
+ return static_cast<float>(x);
+}
+
+#define SK_FloatNaN std::numeric_limits<float>::quiet_NaN()
+#define SK_FloatInfinity (+std::numeric_limits<float>::infinity())
+#define SK_FloatNegativeInfinity (-std::numeric_limits<float>::infinity())
+
+#define SK_DoubleNaN std::numeric_limits<double>::quiet_NaN()
+
+// Returns false if any of the floats are outside of [0...1]
+// Returns true if count is 0
+bool sk_floats_are_unit(const float array[], size_t count);
+
+static inline float sk_float_rsqrt_portable(float x) { return 1.0f / sk_float_sqrt(x); }
+static inline float sk_float_rsqrt (float x) { return 1.0f / sk_float_sqrt(x); }
+
+// Returns the log2 of the provided value, were that value to be rounded up to the next power of 2.
+// Returns 0 if value <= 0:
+// Never returns a negative number, even if value is NaN.
+//
+// sk_float_nextlog2((-inf..1]) -> 0
+// sk_float_nextlog2((1..2]) -> 1
+// sk_float_nextlog2((2..4]) -> 2
+// sk_float_nextlog2((4..8]) -> 3
+// ...
+static inline int sk_float_nextlog2(float x) {
+ uint32_t bits = (uint32_t)SkFloat2Bits(x);
+ bits += (1u << 23) - 1u; // Increment the exponent for non-powers-of-2.
+ int exp = ((int32_t)bits >> 23) - 127;
+ return exp & ~(exp >> 31); // Return 0 for negative or denormalized floats, and exponents < 0.
+}
+
+// This is the number of significant digits we can print in a string such that when we read that
+// string back we get the floating point number we expect. The minimum value C requires is 6, but
+// most compilers support 9
+#ifdef FLT_DECIMAL_DIG
+#define SK_FLT_DECIMAL_DIG FLT_DECIMAL_DIG
+#else
+#define SK_FLT_DECIMAL_DIG 9
+#endif
+
+// IEEE defines how float divide behaves for non-finite values and zero-denoms, but C does not
+// so we have a helper that suppresses the possible undefined-behavior warnings.
+
+#ifdef __clang__
+SK_NO_SANITIZE("float-divide-by-zero")
+#elif defined(__GNUC__)
+SK_ATTRIBUTE(no_sanitize_undefined)
+#endif
+static inline float sk_ieee_float_divide(float numer, float denom) {
+ return numer / denom;
+}
+
+#ifdef __clang__
+SK_NO_SANITIZE("float-divide-by-zero")
+#elif defined(__GNUC__)
+SK_ATTRIBUTE(no_sanitize_undefined)
+#endif
+static inline double sk_ieee_double_divide(double numer, double denom) {
+ return numer / denom;
+}
+
+// While we clean up divide by zero, we'll replace places that do divide by zero with this TODO.
+static inline float sk_ieee_float_divide_TODO_IS_DIVIDE_BY_ZERO_SAFE_HERE(float n, float d) {
+ return sk_ieee_float_divide(n,d);
+}
+
+static inline float sk_fmaf(float f, float m, float a) {
+#if defined(FP_FAST_FMA)
+ return std::fmaf(f,m,a);
+#else
+ return f*m+a;
+#endif
+}
+
+// Returns true iff the provided number is within a small epsilon of 0.
+bool sk_double_nearly_zero(double a);
+
+// Comparing floating point numbers is complicated. This helper only works if one or none
+// of the two inputs is not very close to zero. It also does not work if both inputs could be NaN.
+// The term "ulps" stands for "units of least precision". Read the following for more nuance:
+// https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/
+bool sk_doubles_nearly_equal_ulps(double a, double b, uint8_t max_ulps_diff=16);
+
+#endif
diff --git a/gfx/skia/skia/include/private/base/SkLoadUserConfig.h b/gfx/skia/skia/include/private/base/SkLoadUserConfig.h
new file mode 100644
index 0000000000..397d40bf0c
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkLoadUserConfig.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SK_USER_CONFIG_WAS_LOADED
+
+// Include this to set reasonable defaults (e.g. for SK_CPU_LENDIAN)
+#include "include/private/base/SkFeatures.h"
+
+// Allows embedders that want to disable macros that take arguments to just
+// define that symbol to be one of these
+#define SK_NOTHING_ARG1(arg1)
+#define SK_NOTHING_ARG2(arg1, arg2)
+#define SK_NOTHING_ARG3(arg1, arg2, arg3)
+
+// IWYU pragma: begin_exports
+
+// Note: SK_USER_CONFIG_HEADER will not work with Bazel builds, as that file will not
+// be specified for the Bazel sandbox.
+#if defined (SK_USER_CONFIG_HEADER)
+ #include SK_USER_CONFIG_HEADER
+#else
+ #include "include/config/SkUserConfig.h"
+#endif
+// IWYU pragma: end_exports
+
+// Checks to make sure the SkUserConfig options do not conflict.
+#if !defined(SK_DEBUG) && !defined(SK_RELEASE)
+ #ifdef NDEBUG
+ #define SK_RELEASE
+ #else
+ #define SK_DEBUG
+ #endif
+#endif
+
+#if defined(SK_DEBUG) && defined(SK_RELEASE)
+# error "cannot define both SK_DEBUG and SK_RELEASE"
+#elif !defined(SK_DEBUG) && !defined(SK_RELEASE)
+# error "must define either SK_DEBUG or SK_RELEASE"
+#endif
+
+#if defined(SK_CPU_LENDIAN) && defined(SK_CPU_BENDIAN)
+# error "cannot define both SK_CPU_LENDIAN and SK_CPU_BENDIAN"
+#elif !defined(SK_CPU_LENDIAN) && !defined(SK_CPU_BENDIAN)
+# error "must define either SK_CPU_LENDIAN or SK_CPU_BENDIAN"
+#endif
+
+#if defined(SK_CPU_BENDIAN) && !defined(I_ACKNOWLEDGE_SKIA_DOES_NOT_SUPPORT_BIG_ENDIAN)
+ #error "The Skia team is not endian-savvy enough to support big-endian CPUs."
+ #error "If you still want to use Skia,"
+ #error "please define I_ACKNOWLEDGE_SKIA_DOES_NOT_SUPPORT_BIG_ENDIAN."
+#endif
+
+#define SK_USER_CONFIG_WAS_LOADED
+#endif // SK_USER_CONFIG_WAS_LOADED
diff --git a/gfx/skia/skia/include/private/base/SkMacros.h b/gfx/skia/skia/include/private/base/SkMacros.h
new file mode 100644
index 0000000000..a28602c4fb
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkMacros.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkMacros_DEFINED
+#define SkMacros_DEFINED
+
+/*
+ * Usage: SK_MACRO_CONCAT(a, b) to construct the symbol ab
+ *
+ * SK_MACRO_CONCAT_IMPL_PRIV just exists to make this work. Do not use directly
+ *
+ */
+#define SK_MACRO_CONCAT(X, Y) SK_MACRO_CONCAT_IMPL_PRIV(X, Y)
+#define SK_MACRO_CONCAT_IMPL_PRIV(X, Y) X ## Y
+
+/*
+ * Usage: SK_MACRO_APPEND_LINE(foo) to make foo123, where 123 is the current
+ * line number. Easy way to construct
+ * unique names for local functions or
+ * variables.
+ */
+#define SK_MACRO_APPEND_LINE(name) SK_MACRO_CONCAT(name, __LINE__)
+
+#define SK_MACRO_APPEND_COUNTER(name) SK_MACRO_CONCAT(name, __COUNTER__)
+
+////////////////////////////////////////////////////////////////////////////////
+
+// Can be used to bracket data types that must be dense/packed, e.g. hash keys.
+#if defined(__clang__) // This should work on GCC too, but GCC diagnostic pop didn't seem to work!
+ #define SK_BEGIN_REQUIRE_DENSE _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic error \"-Wpadded\"")
+ #define SK_END_REQUIRE_DENSE _Pragma("GCC diagnostic pop")
+#else
+ #define SK_BEGIN_REQUIRE_DENSE
+ #define SK_END_REQUIRE_DENSE
+#endif
+
+#ifdef MOZ_SKIA
+
+ #ifdef MOZ_ASAN
+ #include "mozilla/MemoryChecking.h"
+ #define SK_INTENTIONALLY_LEAKED(X) MOZ_LSAN_INTENTIONALLY_LEAK_OBJECT(X)
+ #else
+ #define SK_INTENTIONALLY_LEAKED(x) ((void)0)
+ #endif
+
+#else // !MOZ_SKIA
+
+#if defined(__clang__) && defined(__has_feature)
+ // Some compilers have a preprocessor that does not appear to do short-circuit
+ // evaluation as expected
+ #if __has_feature(leak_sanitizer) || __has_feature(address_sanitizer)
+ // Chrome had issues if we tried to include lsan_interface.h ourselves.
+ // https://github.com/llvm/llvm-project/blob/10a35632d55bb05004fe3d0c2d4432bb74897ee7/compiler-rt/include/sanitizer/lsan_interface.h#L26
+extern "C" {
+ void __lsan_ignore_object(const void *p);
+}
+ #define SK_INTENTIONALLY_LEAKED(X) __lsan_ignore_object(X)
+ #else
+ #define SK_INTENTIONALLY_LEAKED(X) ((void)0)
+ #endif
+#else
+ #define SK_INTENTIONALLY_LEAKED(X) ((void)0)
+#endif
+
+#endif // MOZ_SKIA
+
+#define SK_INIT_TO_AVOID_WARNING = 0
+
+////////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Defines overloaded bitwise operators to make it easier to use an enum as a
+ * bitfield.
+ */
+#define SK_MAKE_BITFIELD_OPS(X) \
+ inline X operator ~(X a) { \
+ using U = std::underlying_type_t<X>; \
+ return (X) (~static_cast<U>(a)); \
+ } \
+ inline X operator |(X a, X b) { \
+ using U = std::underlying_type_t<X>; \
+ return (X) (static_cast<U>(a) | static_cast<U>(b)); \
+ } \
+ inline X& operator |=(X& a, X b) { \
+ return (a = a | b); \
+ } \
+ inline X operator &(X a, X b) { \
+ using U = std::underlying_type_t<X>; \
+ return (X) (static_cast<U>(a) & static_cast<U>(b)); \
+ } \
+ inline X& operator &=(X& a, X b) { \
+ return (a = a & b); \
+ }
+
+#define SK_DECL_BITFIELD_OPS_FRIENDS(X) \
+ friend X operator ~(X a); \
+ friend X operator |(X a, X b); \
+ friend X& operator |=(X& a, X b); \
+ \
+ friend X operator &(X a, X b); \
+ friend X& operator &=(X& a, X b);
+
+#endif // SkMacros_DEFINED
diff --git a/gfx/skia/skia/include/private/base/SkMalloc.h b/gfx/skia/skia/include/private/base/SkMalloc.h
new file mode 100644
index 0000000000..1c0c2e73da
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkMalloc.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMalloc_DEFINED
+#define SkMalloc_DEFINED
+
+#include <cstring>
+
+#include "include/private/base/SkAPI.h"
+
+/*
+ memory wrappers to be implemented by the porting layer (platform)
+*/
+
+
+/** Free memory returned by sk_malloc(). It is safe to pass null. */
+SK_API extern void sk_free(void*);
+
+/**
+ * Called internally if we run out of memory. The platform implementation must
+ * not return, but should either throw an exception or otherwise exit.
+ */
+SK_API extern void sk_out_of_memory(void);
+
+enum {
+ /**
+ * If this bit is set, the returned buffer must be zero-initialized. If this bit is not set
+ * the buffer can be uninitialized.
+ */
+ SK_MALLOC_ZERO_INITIALIZE = 1 << 0,
+
+ /**
+ * If this bit is set, the implementation must throw/crash/quit if the request cannot
+ * be fulfilled. If this bit is not set, then it should return nullptr on failure.
+ */
+ SK_MALLOC_THROW = 1 << 1,
+};
+/**
+ * Return a block of memory (at least 4-byte aligned) of at least the specified size.
+ * If the requested memory cannot be returned, either return nullptr or throw/exit, depending
+ * on the SK_MALLOC_THROW bit. If the allocation succeeds, the memory will be zero-initialized
+ * if the SK_MALLOC_ZERO_INITIALIZE bit was set.
+ *
+ * To free the memory, call sk_free()
+ */
+SK_API extern void* sk_malloc_flags(size_t size, unsigned flags);
+
+/** Same as standard realloc(), but this one never returns null on failure. It will throw
+ * if it fails.
+ * If size is 0, it will call sk_free on buffer and return null. (This behavior is implementation-
+ * defined for normal realloc. We follow what glibc does.)
+ */
+SK_API extern void* sk_realloc_throw(void* buffer, size_t size);
+
+static inline void* sk_malloc_throw(size_t size) {
+ return sk_malloc_flags(size, SK_MALLOC_THROW);
+}
+
+static inline void* sk_calloc_throw(size_t size) {
+ return sk_malloc_flags(size, SK_MALLOC_THROW | SK_MALLOC_ZERO_INITIALIZE);
+}
+
+static inline void* sk_calloc_canfail(size_t size) {
+#if defined(SK_BUILD_FOR_FUZZER)
+ // To reduce the chance of OOM, pretend we can't allocate more than 200kb.
+ if (size > 200000) {
+ return nullptr;
+ }
+#endif
+ return sk_malloc_flags(size, SK_MALLOC_ZERO_INITIALIZE);
+}
+
+// Performs a safe multiply count * elemSize, checking for overflow
+SK_API extern void* sk_calloc_throw(size_t count, size_t elemSize);
+SK_API extern void* sk_malloc_throw(size_t count, size_t elemSize);
+SK_API extern void* sk_realloc_throw(void* buffer, size_t count, size_t elemSize);
+
+/**
+ * These variants return nullptr on failure
+ */
+static inline void* sk_malloc_canfail(size_t size) {
+#if defined(SK_BUILD_FOR_FUZZER)
+ // To reduce the chance of OOM, pretend we can't allocate more than 200kb.
+ if (size > 200000) {
+ return nullptr;
+ }
+#endif
+ return sk_malloc_flags(size, 0);
+}
+SK_API extern void* sk_malloc_canfail(size_t count, size_t elemSize);
+
+// bzero is safer than memset, but we can't rely on it, so... sk_bzero()
+static inline void sk_bzero(void* buffer, size_t size) {
+ // Please c.f. sk_careful_memcpy. It's undefined behavior to call memset(null, 0, 0).
+ if (size) {
+ memset(buffer, 0, size);
+ }
+}
+
+/**
+ * sk_careful_memcpy() is just like memcpy(), but guards against undefined behavior.
+ *
+ * It is undefined behavior to call memcpy() with null dst or src, even if len is 0.
+ * If an optimizer is "smart" enough, it can exploit this to do unexpected things.
+ * memcpy(dst, src, 0);
+ * if (src) {
+ * printf("%x\n", *src);
+ * }
+ * In this code the compiler can assume src is not null and omit the if (src) {...} check,
+ * unconditionally running the printf, crashing the program if src really is null.
+ * Of the compilers we pay attention to only GCC performs this optimization in practice.
+ */
+static inline void* sk_careful_memcpy(void* dst, const void* src, size_t len) {
+ // When we pass >0 len we had better already be passing valid pointers.
+ // So we just need to skip calling memcpy when len == 0.
+ if (len) {
+ memcpy(dst,src,len);
+ }
+ return dst;
+}
+
+static inline void* sk_careful_memmove(void* dst, const void* src, size_t len) {
+ // When we pass >0 len we had better already be passing valid pointers.
+ // So we just need to skip calling memcpy when len == 0.
+ if (len) {
+ memmove(dst,src,len);
+ }
+ return dst;
+}
+
+static inline int sk_careful_memcmp(const void* a, const void* b, size_t len) {
+ // When we pass >0 len we had better already be passing valid pointers.
+ // So we just need to skip calling memcmp when len == 0.
+ if (len == 0) {
+ return 0; // we treat zero-length buffers as "equal"
+ }
+ return memcmp(a, b, len);
+}
+
+#endif // SkMalloc_DEFINED
diff --git a/gfx/skia/skia/include/private/base/SkMath.h b/gfx/skia/skia/include/private/base/SkMath.h
new file mode 100644
index 0000000000..34bfa739f7
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkMath.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMath_DEFINED
+#define SkMath_DEFINED
+
+#include "include/private/base/SkAssert.h"
+#include "include/private/base/SkCPUTypes.h"
+
+#include <cstdint>
+#include <climits>
+
+// Max Signed 16 bit value
+static constexpr int16_t SK_MaxS16 = INT16_MAX;
+static constexpr int16_t SK_MinS16 = -SK_MaxS16;
+
+static constexpr int32_t SK_MaxS32 = INT32_MAX;
+static constexpr int32_t SK_MinS32 = -SK_MaxS32;
+static constexpr int32_t SK_NaN32 = INT32_MIN;
+
+static constexpr int64_t SK_MaxS64 = INT64_MAX;
+static constexpr int64_t SK_MinS64 = -SK_MaxS64;
+
+// 64bit -> 32bit utilities
+
+// Handy util that can be passed two ints, and will automatically promote to
+// 64bits before the multiply, so the caller doesn't have to remember to cast
+// e.g. (int64_t)a * b;
+static inline int64_t sk_64_mul(int64_t a, int64_t b) {
+ return a * b;
+}
+
+static inline constexpr int32_t SkLeftShift(int32_t value, int32_t shift) {
+ return (int32_t) ((uint32_t) value << shift);
+}
+
+static inline constexpr int64_t SkLeftShift(int64_t value, int32_t shift) {
+ return (int64_t) ((uint64_t) value << shift);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Returns true if value is a power of 2. Does not explicitly check for
+ * value <= 0.
+ */
+template <typename T> constexpr inline bool SkIsPow2(T value) {
+ return (value & (value - 1)) == 0;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Return a*b/((1 << shift) - 1), rounding any fractional bits.
+ * Only valid if a and b are unsigned and <= 32767 and shift is > 0 and <= 8
+ */
+static inline unsigned SkMul16ShiftRound(U16CPU a, U16CPU b, int shift) {
+ SkASSERT(a <= 32767);
+ SkASSERT(b <= 32767);
+ SkASSERT(shift > 0 && shift <= 8);
+ unsigned prod = a*b + (1 << (shift - 1));
+ return (prod + (prod >> shift)) >> shift;
+}
+
+/**
+ * Return a*b/255, rounding any fractional bits.
+ * Only valid if a and b are unsigned and <= 32767.
+ */
+static inline U8CPU SkMulDiv255Round(U16CPU a, U16CPU b) {
+ return SkMul16ShiftRound(a, b, 8);
+}
+
+#endif
diff --git a/gfx/skia/skia/include/private/base/SkMutex.h b/gfx/skia/skia/include/private/base/SkMutex.h
new file mode 100644
index 0000000000..4452beb912
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkMutex.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMutex_DEFINED
+#define SkMutex_DEFINED
+
+#include "include/private/base/SkAssert.h"
+#include "include/private/base/SkDebug.h"
+#include "include/private/base/SkSemaphore.h"
+#include "include/private/base/SkThreadAnnotations.h"
+#include "include/private/base/SkThreadID.h"
+
+class SK_CAPABILITY("mutex") SkMutex {
+public:
+ constexpr SkMutex() = default;
+
+ ~SkMutex() {
+ this->assertNotHeld();
+ }
+
+ void acquire() SK_ACQUIRE() {
+ fSemaphore.wait();
+ SkDEBUGCODE(fOwner = SkGetThreadID();)
+ }
+
+ void release() SK_RELEASE_CAPABILITY() {
+ this->assertHeld();
+ SkDEBUGCODE(fOwner = kIllegalThreadID;)
+ fSemaphore.signal();
+ }
+
+ void assertHeld() SK_ASSERT_CAPABILITY(this) {
+ SkASSERT(fOwner == SkGetThreadID());
+ }
+
+ void assertNotHeld() {
+ SkASSERT(fOwner == kIllegalThreadID);
+ }
+
+private:
+ SkSemaphore fSemaphore{1};
+ SkDEBUGCODE(SkThreadID fOwner{kIllegalThreadID};)
+};
+
+class SK_SCOPED_CAPABILITY SkAutoMutexExclusive {
+public:
+ SkAutoMutexExclusive(SkMutex& mutex) SK_ACQUIRE(mutex) : fMutex(mutex) { fMutex.acquire(); }
+ ~SkAutoMutexExclusive() SK_RELEASE_CAPABILITY() { fMutex.release(); }
+
+ SkAutoMutexExclusive(const SkAutoMutexExclusive&) = delete;
+ SkAutoMutexExclusive(SkAutoMutexExclusive&&) = delete;
+
+ SkAutoMutexExclusive& operator=(const SkAutoMutexExclusive&) = delete;
+ SkAutoMutexExclusive& operator=(SkAutoMutexExclusive&&) = delete;
+
+private:
+ SkMutex& fMutex;
+};
+
+#endif // SkMutex_DEFINED
diff --git a/gfx/skia/skia/include/private/base/SkNoncopyable.h b/gfx/skia/skia/include/private/base/SkNoncopyable.h
new file mode 100644
index 0000000000..ec4a4e5161
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkNoncopyable.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkNoncopyable_DEFINED
+#define SkNoncopyable_DEFINED
+
+#include "include/private/base/SkAPI.h"
+
+/** \class SkNoncopyable (DEPRECATED)
+
+ SkNoncopyable is the base class for objects that do not want to
+ be copied. It hides its copy-constructor and its assignment-operator.
+*/
+class SK_API SkNoncopyable {
+public:
+ SkNoncopyable() = default;
+
+ SkNoncopyable(SkNoncopyable&&) = default;
+ SkNoncopyable& operator =(SkNoncopyable&&) = default;
+
+private:
+ SkNoncopyable(const SkNoncopyable&) = delete;
+ SkNoncopyable& operator=(const SkNoncopyable&) = delete;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/private/base/SkOnce.h b/gfx/skia/skia/include/private/base/SkOnce.h
new file mode 100644
index 0000000000..97ce6b6311
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkOnce.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOnce_DEFINED
+#define SkOnce_DEFINED
+
+#include "include/private/base/SkThreadAnnotations.h"
+
+#include <atomic>
+#include <cstdint>
+#include <utility>
+
+// SkOnce provides call-once guarantees for Skia, much like std::once_flag/std::call_once().
+//
+// There should be no particularly error-prone gotcha use cases when using SkOnce.
+// It works correctly as a class member, a local, a global, a function-scoped static, whatever.
+
+class SkOnce {
+public:
+ constexpr SkOnce() = default;
+
+ template <typename Fn, typename... Args>
+ void operator()(Fn&& fn, Args&&... args) {
+ auto state = fState.load(std::memory_order_acquire);
+
+ if (state == Done) {
+ return;
+ }
+
+ // If it looks like no one has started calling fn(), try to claim that job.
+ if (state == NotStarted && fState.compare_exchange_strong(state, Claimed,
+ std::memory_order_relaxed,
+ std::memory_order_relaxed)) {
+ // Great! We'll run fn() then notify the other threads by releasing Done into fState.
+ fn(std::forward<Args>(args)...);
+ return fState.store(Done, std::memory_order_release);
+ }
+
+ // Some other thread is calling fn().
+ // We'll just spin here acquiring until it releases Done into fState.
+ SK_POTENTIALLY_BLOCKING_REGION_BEGIN;
+ while (fState.load(std::memory_order_acquire) != Done) { /*spin*/ }
+ SK_POTENTIALLY_BLOCKING_REGION_END;
+ }
+
+private:
+ enum State : uint8_t { NotStarted, Claimed, Done};
+ std::atomic<uint8_t> fState{NotStarted};
+};
+
+#endif // SkOnce_DEFINED
diff --git a/gfx/skia/skia/include/private/base/SkPathEnums.h b/gfx/skia/skia/include/private/base/SkPathEnums.h
new file mode 100644
index 0000000000..642bbb3489
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkPathEnums.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ *
+ * This file contains private enums related to paths. See also skbug.com/10670
+ */
+
+#ifndef SkPathEnums_DEFINED
+#define SkPathEnums_DEFINED
+
+enum class SkPathConvexity {
+ kConvex,
+ kConcave,
+ kUnknown,
+};
+
+enum class SkPathFirstDirection {
+ kCW, // == SkPathDirection::kCW
+ kCCW, // == SkPathDirection::kCCW
+ kUnknown,
+};
+
+#endif
diff --git a/gfx/skia/skia/include/private/base/SkSafe32.h b/gfx/skia/skia/include/private/base/SkSafe32.h
new file mode 100644
index 0000000000..5ba4c2f9a4
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkSafe32.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSafe32_DEFINED
+#define SkSafe32_DEFINED
+
+#include "include/private/base/SkAssert.h"
+#include "include/private/base/SkMath.h"
+
+#include <cstdint>
+
+static constexpr int32_t Sk64_pin_to_s32(int64_t x) {
+ return x < SK_MinS32 ? SK_MinS32 : (x > SK_MaxS32 ? SK_MaxS32 : (int32_t)x);
+}
+
+static constexpr int32_t Sk32_sat_add(int32_t a, int32_t b) {
+ return Sk64_pin_to_s32((int64_t)a + (int64_t)b);
+}
+
+static constexpr int32_t Sk32_sat_sub(int32_t a, int32_t b) {
+ return Sk64_pin_to_s32((int64_t)a - (int64_t)b);
+}
+
+// To avoid UBSAN complaints about 2's compliment overflows
+//
+static constexpr int32_t Sk32_can_overflow_add(int32_t a, int32_t b) {
+ return (int32_t)((uint32_t)a + (uint32_t)b);
+}
+static constexpr int32_t Sk32_can_overflow_sub(int32_t a, int32_t b) {
+ return (int32_t)((uint32_t)a - (uint32_t)b);
+}
+
+/**
+ * This is a 'safe' abs for 32-bit integers that asserts when undefined behavior would occur.
+ * SkTAbs (in SkTemplates.h) is a general purpose absolute-value function.
+ */
+static inline int32_t SkAbs32(int32_t value) {
+ SkASSERT(value != SK_NaN32); // The most negative int32_t can't be negated.
+ if (value < 0) {
+ value = -value;
+ }
+ return value;
+}
+
+#endif
diff --git a/gfx/skia/skia/include/private/base/SkSemaphore.h b/gfx/skia/skia/include/private/base/SkSemaphore.h
new file mode 100644
index 0000000000..f78ee86625
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkSemaphore.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSemaphore_DEFINED
+#define SkSemaphore_DEFINED
+
+#include "include/private/base/SkAPI.h"
+#include "include/private/base/SkOnce.h"
+#include "include/private/base/SkThreadAnnotations.h"
+
+#include <algorithm>
+#include <atomic>
+
+class SkSemaphore {
+public:
+ constexpr SkSemaphore(int count = 0) : fCount(count), fOSSemaphore(nullptr) {}
+
+ // Cleanup the underlying OS semaphore.
+ SK_SPI ~SkSemaphore();
+
+ // Increment the counter n times.
+ // Generally it's better to call signal(n) instead of signal() n times.
+ void signal(int n = 1);
+
+ // Decrement the counter by 1,
+ // then if the counter is < 0, sleep this thread until the counter is >= 0.
+ void wait();
+
+ // If the counter is positive, decrement it by 1 and return true, otherwise return false.
+ SK_SPI bool try_wait();
+
+private:
+ // This implementation follows the general strategy of
+ // 'A Lightweight Semaphore with Partial Spinning'
+ // found here
+ // http://preshing.com/20150316/semaphores-are-surprisingly-versatile/
+ // That article (and entire blog) are very much worth reading.
+ //
+ // We wrap an OS-provided semaphore with a user-space atomic counter that
+ // lets us avoid interacting with the OS semaphore unless strictly required:
+ // moving the count from >=0 to <0 or vice-versa, i.e. sleeping or waking threads.
+ struct OSSemaphore;
+
+ SK_SPI void osSignal(int n);
+ SK_SPI void osWait();
+
+ std::atomic<int> fCount;
+ SkOnce fOSSemaphoreOnce;
+ OSSemaphore* fOSSemaphore;
+};
+
+inline void SkSemaphore::signal(int n) {
+ int prev = fCount.fetch_add(n, std::memory_order_release);
+
+ // We only want to call the OS semaphore when our logical count crosses
+ // from <0 to >=0 (when we need to wake sleeping threads).
+ //
+ // This is easiest to think about with specific examples of prev and n.
+ // If n == 5 and prev == -3, there are 3 threads sleeping and we signal
+ // std::min(-(-3), 5) == 3 times on the OS semaphore, leaving the count at 2.
+ //
+ // If prev >= 0, no threads are waiting, std::min(-prev, n) is always <= 0,
+ // so we don't call the OS semaphore, leaving the count at (prev + n).
+ int toSignal = std::min(-prev, n);
+ if (toSignal > 0) {
+ this->osSignal(toSignal);
+ }
+}
+
+inline void SkSemaphore::wait() {
+ // Since this fetches the value before the subtract, zero and below means that there are no
+ // resources left, so the thread needs to wait.
+ if (fCount.fetch_sub(1, std::memory_order_acquire) <= 0) {
+ SK_POTENTIALLY_BLOCKING_REGION_BEGIN;
+ this->osWait();
+ SK_POTENTIALLY_BLOCKING_REGION_END;
+ }
+}
+
+#endif//SkSemaphore_DEFINED
diff --git a/gfx/skia/skia/include/private/base/SkSpan_impl.h b/gfx/skia/skia/include/private/base/SkSpan_impl.h
new file mode 100644
index 0000000000..5f31a651bb
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkSpan_impl.h
@@ -0,0 +1,129 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSpan_DEFINED
+#define SkSpan_DEFINED
+
+#include "include/private/base/SkAssert.h"
+#include "include/private/base/SkTo.h"
+
+#include <cstddef>
+#include <initializer_list>
+#include <iterator>
+#include <limits>
+#include <utility>
+
+// Having this be an export works around IWYU churn related to
+// https://github.com/include-what-you-use/include-what-you-use/issues/1121
+#include <type_traits> // IWYU pragma: export
+
+// Add macro to check the lifetime of initializer_list arguments. initializer_list has a very
+// short life span, and can only be used as a parameter, and not as a variable.
+#if defined(__clang__) && defined(__has_cpp_attribute) && __has_cpp_attribute(clang::lifetimebound)
+#define SK_CHECK_IL_LIFETIME [[clang::lifetimebound]]
+#else
+#define SK_CHECK_IL_LIFETIME
+#endif
+
+/**
+ * SkSpan holds a reference to contiguous data of type T along with a count. SkSpan does not own
+ * the data itself but is merely a reference, therefore you must take care with the lifetime of
+ * the underlying data.
+ *
+ * SkSpan is a count and a pointer into existing array or data type that stores its data in
+ * contiguous memory like std::vector. Any container that works with std::size() and std::data()
+ * can be used.
+ *
+ * SkSpan makes a convenient parameter for a routine to accept array like things. This allows you to
+ * write the routine without overloads for all different container types.
+ *
+ * Example:
+ * void routine(SkSpan<const int> a) { ... }
+ *
+ * std::vector v = {1, 2, 3, 4, 5};
+ *
+ * routine(a);
+ *
+ * A word of caution when working with initializer_list, initializer_lists have a lifetime that is
+ * limited to the current statement. The following is correct and safe:
+ *
+ * Example:
+ * routine({1,2,3,4,5});
+ *
+ * The following is undefined, and will result in erratic execution:
+ *
+ * Bad Example:
+ * initializer_list l = {1, 2, 3, 4, 5}; // The data behind l dies at the ;.
+ * routine(l);
+ */
+template <typename T>
+class SkSpan {
+public:
+ constexpr SkSpan() : fPtr{nullptr}, fSize{0} {}
+
+ template <typename Integer, std::enable_if_t<std::is_integral_v<Integer>, bool> = true>
+ constexpr SkSpan(T* ptr, Integer size) : fPtr{ptr}, fSize{SkToSizeT(size)} {
+ SkASSERT(ptr || fSize == 0); // disallow nullptr + a nonzero size
+ SkASSERT(fSize < kMaxSize);
+ }
+ template <typename U, typename = std::enable_if_t<std::is_same_v<const U, T>>>
+ constexpr SkSpan(const SkSpan<U>& that) : fPtr(std::data(that)), fSize(std::size(that)) {}
+ constexpr SkSpan(const SkSpan& o) = default;
+ template<size_t N> constexpr SkSpan(T(&a)[N]) : SkSpan(a, N) { }
+ template<typename Container>
+ constexpr SkSpan(Container& c) : SkSpan(std::data(c), std::size(c)) { }
+ SkSpan(std::initializer_list<T> il SK_CHECK_IL_LIFETIME)
+ : SkSpan(std::data(il), std::size(il)) {}
+
+ constexpr SkSpan& operator=(const SkSpan& that) = default;
+
+ constexpr T& operator [] (size_t i) const {
+ SkASSERT(i < this->size());
+ return fPtr[i];
+ }
+ constexpr T& front() const { return fPtr[0]; }
+ constexpr T& back() const { return fPtr[fSize - 1]; }
+ constexpr T* begin() const { return fPtr; }
+ constexpr T* end() const { return fPtr + fSize; }
+ constexpr auto rbegin() const { return std::make_reverse_iterator(this->end()); }
+ constexpr auto rend() const { return std::make_reverse_iterator(this->begin()); }
+ constexpr T* data() const { return this->begin(); }
+ constexpr size_t size() const { return fSize; }
+ constexpr bool empty() const { return fSize == 0; }
+ constexpr size_t size_bytes() const { return fSize * sizeof(T); }
+ constexpr SkSpan<T> first(size_t prefixLen) const {
+ SkASSERT(prefixLen <= this->size());
+ return SkSpan{fPtr, prefixLen};
+ }
+ constexpr SkSpan<T> last(size_t postfixLen) const {
+ SkASSERT(postfixLen <= this->size());
+ return SkSpan{fPtr + (this->size() - postfixLen), postfixLen};
+ }
+ constexpr SkSpan<T> subspan(size_t offset) const {
+ return this->subspan(offset, this->size() - offset);
+ }
+ constexpr SkSpan<T> subspan(size_t offset, size_t count) const {
+ SkASSERT(offset <= this->size());
+ SkASSERT(count <= this->size() - offset);
+ return SkSpan{fPtr + offset, count};
+ }
+
+private:
+ static const constexpr size_t kMaxSize = std::numeric_limits<size_t>::max() / sizeof(T);
+ T* fPtr;
+ size_t fSize;
+};
+
+template <typename Container>
+SkSpan(Container&) ->
+ SkSpan<std::remove_pointer_t<decltype(std::data(std::declval<Container&>()))>>;
+
+template <typename T>
+SkSpan(std::initializer_list<T>) ->
+ SkSpan<std::remove_pointer_t<decltype(std::data(std::declval<std::initializer_list<T>>()))>>;
+
+#endif // SkSpan_DEFINED
diff --git a/gfx/skia/skia/include/private/base/SkTArray.h b/gfx/skia/skia/include/private/base/SkTArray.h
new file mode 100644
index 0000000000..635d04e2a8
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkTArray.h
@@ -0,0 +1,696 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTArray_DEFINED
+#define SkTArray_DEFINED
+
+#include "include/private/base/SkAlignedStorage.h"
+#include "include/private/base/SkAssert.h"
+#include "include/private/base/SkAttributes.h"
+#include "include/private/base/SkContainers.h"
+#include "include/private/base/SkMalloc.h"
+#include "include/private/base/SkMath.h"
+#include "include/private/base/SkSpan_impl.h"
+#include "include/private/base/SkTo.h"
+#include "include/private/base/SkTypeTraits.h" // IWYU pragma: keep
+
+#include <algorithm>
+#include <climits>
+#include <cstddef>
+#include <cstdint>
+#include <cstring>
+#include <initializer_list>
+#include <new>
+#include <utility>
+
+namespace skia_private {
+/** TArray<T> implements a typical, mostly std::vector-like array.
+ Each T will be default-initialized on allocation, and ~T will be called on destruction.
+
+ MEM_MOVE controls the behavior when a T needs to be moved (e.g. when the array is resized)
+ - true: T will be bit-copied via memcpy.
+ - false: T will be moved via move-constructors.
+*/
+template <typename T, bool MEM_MOVE = sk_is_trivially_relocatable_v<T>> class TArray {
+public:
+ using value_type = T;
+
+ /**
+ * Creates an empty array with no initial storage
+ */
+ TArray() : fOwnMemory(true), fCapacity{0} {}
+
+ /**
+ * Creates an empty array that will preallocate space for reserveCount
+ * elements.
+ */
+ explicit TArray(int reserveCount) : TArray() { this->reserve_back(reserveCount); }
+
+ /**
+ * Copies one array to another. The new array will be heap allocated.
+ */
+ TArray(const TArray& that) : TArray(that.fData, that.fSize) {}
+
+ TArray(TArray&& that) {
+ if (that.fOwnMemory) {
+ this->setData(that);
+ that.setData({});
+ } else {
+ this->initData(that.fSize);
+ that.move(fData);
+ }
+ fSize = std::exchange(that.fSize, 0);
+ }
+
+ /**
+ * Creates a TArray by copying contents of a standard C array. The new
+ * array will be heap allocated. Be careful not to use this constructor
+ * when you really want the (void*, int) version.
+ */
+ TArray(const T* array, int count) {
+ this->initData(count);
+ this->copy(array);
+ }
+
+ /**
+ * Creates a TArray by copying contents of an initializer list.
+ */
+ TArray(std::initializer_list<T> data) : TArray(data.begin(), data.size()) {}
+
+ TArray& operator=(const TArray& that) {
+ if (this == &that) {
+ return *this;
+ }
+ this->clear();
+ this->checkRealloc(that.size(), kExactFit);
+ fSize = that.fSize;
+ this->copy(that.fData);
+ return *this;
+ }
+ TArray& operator=(TArray&& that) {
+ if (this != &that) {
+ this->clear();
+ if (that.fOwnMemory) {
+ // The storage is on the heap, so move the data pointer.
+ if (fOwnMemory) {
+ sk_free(fData);
+ }
+
+ fData = std::exchange(that.fData, nullptr);
+
+ // Can't use exchange with bitfields.
+ fCapacity = that.fCapacity;
+ that.fCapacity = 0;
+
+ fOwnMemory = true;
+ } else {
+ // The data is stored inline in that, so move it element-by-element.
+ this->checkRealloc(that.size(), kExactFit);
+ that.move(fData);
+ }
+ fSize = std::exchange(that.fSize, 0);
+ }
+ return *this;
+ }
+
+ ~TArray() {
+ this->destroyAll();
+ if (fOwnMemory) {
+ sk_free(fData);
+ }
+ }
+
+ /**
+ * Resets to size() = n newly constructed T objects and resets any reserve count.
+ */
+ void reset(int n) {
+ SkASSERT(n >= 0);
+ this->clear();
+ this->checkRealloc(n, kExactFit);
+ fSize = n;
+ for (int i = 0; i < this->size(); ++i) {
+ new (fData + i) T;
+ }
+ }
+
+ /**
+ * Resets to a copy of a C array and resets any reserve count.
+ */
+ void reset(const T* array, int count) {
+ SkASSERT(count >= 0);
+ this->clear();
+ this->checkRealloc(count, kExactFit);
+ fSize = count;
+ this->copy(array);
+ }
+
+ /**
+ * Ensures there is enough reserved space for n elements.
+ */
+ void reserve(int n) {
+ SkASSERT(n >= 0);
+ if (n > this->size()) {
+ this->checkRealloc(n - this->size(), kGrowing);
+ }
+ }
+
+ /**
+ * Ensures there is enough reserved space for n additional elements. The is guaranteed at least
+ * until the array size grows above n and subsequently shrinks below n, any version of reset()
+ * is called, or reserve_back() is called again.
+ */
+ void reserve_back(int n) {
+ SkASSERT(n >= 0);
+ if (n > 0) {
+ this->checkRealloc(n, kExactFit);
+ }
+ }
+
+ void removeShuffle(int n) {
+ SkASSERT(n < this->size());
+ int newCount = fSize - 1;
+ fSize = newCount;
+ fData[n].~T();
+ if (n != newCount) {
+ this->move(n, newCount);
+ }
+ }
+
+ // Is the array empty.
+ bool empty() const { return fSize == 0; }
+
+ /**
+ * Adds 1 new default-initialized T value and returns it by reference. Note
+ * the reference only remains valid until the next call that adds or removes
+ * elements.
+ */
+ T& push_back() {
+ void* newT = this->push_back_raw(1);
+ return *new (newT) T;
+ }
+
+ /**
+ * Version of above that uses a copy constructor to initialize the new item
+ */
+ T& push_back(const T& t) {
+ void* newT = this->push_back_raw(1);
+ return *new (newT) T(t);
+ }
+
+ /**
+ * Version of above that uses a move constructor to initialize the new item
+ */
+ T& push_back(T&& t) {
+ void* newT = this->push_back_raw(1);
+ return *new (newT) T(std::move(t));
+ }
+
+ /**
+ * Construct a new T at the back of this array.
+ */
+ template<class... Args> T& emplace_back(Args&&... args) {
+ void* newT = this->push_back_raw(1);
+ return *new (newT) T(std::forward<Args>(args)...);
+ }
+
+ /**
+ * Allocates n more default-initialized T values, and returns the address of
+ * the start of that new range. Note: this address is only valid until the
+ * next API call made on the array that might add or remove elements.
+ */
+ T* push_back_n(int n) {
+ SkASSERT(n >= 0);
+ T* newTs = TCast(this->push_back_raw(n));
+ for (int i = 0; i < n; ++i) {
+ new (&newTs[i]) T;
+ }
+ return newTs;
+ }
+
+ /**
+ * Version of above that uses a copy constructor to initialize all n items
+ * to the same T.
+ */
+ T* push_back_n(int n, const T& t) {
+ SkASSERT(n >= 0);
+ T* newTs = TCast(this->push_back_raw(n));
+ for (int i = 0; i < n; ++i) {
+ new (&newTs[i]) T(t);
+ }
+ return static_cast<T*>(newTs);
+ }
+
+ /**
+ * Version of above that uses a copy constructor to initialize the n items
+ * to separate T values.
+ */
+ T* push_back_n(int n, const T t[]) {
+ SkASSERT(n >= 0);
+ this->checkRealloc(n, kGrowing);
+ T* end = this->end();
+ for (int i = 0; i < n; ++i) {
+ new (end + i) T(t[i]);
+ }
+ fSize += n;
+ return end;
+ }
+
+ /**
+ * Version of above that uses the move constructor to set n items.
+ */
+ T* move_back_n(int n, T* t) {
+ SkASSERT(n >= 0);
+ this->checkRealloc(n, kGrowing);
+ T* end = this->end();
+ for (int i = 0; i < n; ++i) {
+ new (end + i) T(std::move(t[i]));
+ }
+ fSize += n;
+ return end;
+ }
+
+ /**
+ * Removes the last element. Not safe to call when size() == 0.
+ */
+ void pop_back() {
+ SkASSERT(fSize > 0);
+ --fSize;
+ fData[fSize].~T();
+ }
+
+ /**
+ * Removes the last n elements. Not safe to call when size() < n.
+ */
+ void pop_back_n(int n) {
+ SkASSERT(n >= 0);
+ SkASSERT(this->size() >= n);
+ int i = fSize;
+ while (i-- > fSize - n) {
+ (*this)[i].~T();
+ }
+ fSize -= n;
+ }
+
+ /**
+ * Pushes or pops from the back to resize. Pushes will be default
+ * initialized.
+ */
+ void resize_back(int newCount) {
+ SkASSERT(newCount >= 0);
+
+ if (newCount > this->size()) {
+ this->push_back_n(newCount - fSize);
+ } else if (newCount < this->size()) {
+ this->pop_back_n(fSize - newCount);
+ }
+ }
+
+ /** Swaps the contents of this array with that array. Does a pointer swap if possible,
+ otherwise copies the T values. */
+ void swap(TArray& that) {
+ using std::swap;
+ if (this == &that) {
+ return;
+ }
+ if (fOwnMemory && that.fOwnMemory) {
+ swap(fData, that.fData);
+ swap(fSize, that.fSize);
+
+ // Can't use swap because fCapacity is a bit field.
+ auto allocCount = fCapacity;
+ fCapacity = that.fCapacity;
+ that.fCapacity = allocCount;
+ } else {
+ // This could be more optimal...
+ TArray copy(std::move(that));
+ that = std::move(*this);
+ *this = std::move(copy);
+ }
+ }
+
+ T* begin() {
+ return fData;
+ }
+ const T* begin() const {
+ return fData;
+ }
+
+ // It's safe to use fItemArray + fSize because if fItemArray is nullptr then adding 0 is
+ // valid and returns nullptr. See [expr.add] in the C++ standard.
+ T* end() {
+ if (fData == nullptr) {
+ SkASSERT(fSize == 0);
+ }
+ return fData + fSize;
+ }
+ const T* end() const {
+ if (fData == nullptr) {
+ SkASSERT(fSize == 0);
+ }
+ return fData + fSize;
+ }
+ T* data() { return fData; }
+ const T* data() const { return fData; }
+ int size() const { return fSize; }
+ size_t size_bytes() const { return this->bytes(fSize); }
+ void resize(size_t count) { this->resize_back((int)count); }
+
+ void clear() {
+ this->destroyAll();
+ fSize = 0;
+ }
+
+ void shrink_to_fit() {
+ if (!fOwnMemory || fSize == fCapacity) {
+ return;
+ }
+ if (fSize == 0) {
+ sk_free(fData);
+ fData = nullptr;
+ fCapacity = 0;
+ } else {
+ SkSpan<std::byte> allocation = Allocate(fSize);
+ this->move(TCast(allocation.data()));
+ if (fOwnMemory) {
+ sk_free(fData);
+ }
+ this->setDataFromBytes(allocation);
+ }
+ }
+
+ /**
+ * Get the i^th element.
+ */
+ T& operator[] (int i) {
+ SkASSERT(i < this->size());
+ SkASSERT(i >= 0);
+ return fData[i];
+ }
+
+ const T& operator[] (int i) const {
+ SkASSERT(i < this->size());
+ SkASSERT(i >= 0);
+ return fData[i];
+ }
+
+ T& at(int i) { return (*this)[i]; }
+ const T& at(int i) const { return (*this)[i]; }
+
+ /**
+ * equivalent to operator[](0)
+ */
+ T& front() { SkASSERT(fSize > 0); return fData[0];}
+
+ const T& front() const { SkASSERT(fSize > 0); return fData[0];}
+
+ /**
+ * equivalent to operator[](size() - 1)
+ */
+ T& back() { SkASSERT(fSize); return fData[fSize - 1];}
+
+ const T& back() const { SkASSERT(fSize > 0); return fData[fSize - 1];}
+
+ /**
+ * equivalent to operator[](size()-1-i)
+ */
+ T& fromBack(int i) {
+ SkASSERT(i >= 0);
+ SkASSERT(i < this->size());
+ return fData[fSize - i - 1];
+ }
+
+ const T& fromBack(int i) const {
+ SkASSERT(i >= 0);
+ SkASSERT(i < this->size());
+ return fData[fSize - i - 1];
+ }
+
+ bool operator==(const TArray<T, MEM_MOVE>& right) const {
+ int leftCount = this->size();
+ if (leftCount != right.size()) {
+ return false;
+ }
+ for (int index = 0; index < leftCount; ++index) {
+ if (fData[index] != right.fData[index]) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ bool operator!=(const TArray<T, MEM_MOVE>& right) const {
+ return !(*this == right);
+ }
+
+ int capacity() const {
+ return fCapacity;
+ }
+
+protected:
+ // Creates an empty array that will use the passed storage block until it is insufficiently
+ // large to hold the entire array.
+ template <int InitialCapacity>
+ TArray(SkAlignedSTStorage<InitialCapacity, T>* storage, int size = 0) {
+ static_assert(InitialCapacity >= 0);
+ SkASSERT(size >= 0);
+ SkASSERT(storage->get() != nullptr);
+ if (size > InitialCapacity) {
+ this->initData(size);
+ } else {
+ this->setDataFromBytes(*storage);
+ fSize = size;
+
+ // setDataFromBytes always sets fOwnMemory to true, but we are actually using static
+ // storage here, which shouldn't ever be freed.
+ fOwnMemory = false;
+ }
+ }
+
+ // Copy a C array, using pre-allocated storage if preAllocCount >= count. Otherwise, storage
+ // will only be used when array shrinks to fit.
+ template <int InitialCapacity>
+ TArray(const T* array, int size, SkAlignedSTStorage<InitialCapacity, T>* storage)
+ : TArray{storage, size}
+ {
+ this->copy(array);
+ }
+
+private:
+ // Growth factors for checkRealloc.
+ static constexpr double kExactFit = 1.0;
+ static constexpr double kGrowing = 1.5;
+
+ static constexpr int kMinHeapAllocCount = 8;
+ static_assert(SkIsPow2(kMinHeapAllocCount), "min alloc count not power of two.");
+
+ // Note for 32-bit machines kMaxCapacity will be <= SIZE_MAX. For 64-bit machines it will
+ // just be INT_MAX if the sizeof(T) < 2^32.
+ static constexpr int kMaxCapacity = SkToInt(std::min(SIZE_MAX / sizeof(T), (size_t)INT_MAX));
+
+ void setDataFromBytes(SkSpan<std::byte> allocation) {
+ T* data = TCast(allocation.data());
+ // We have gotten extra bytes back from the allocation limit, pin to kMaxCapacity. It
+ // would seem like the SkContainerAllocator should handle the divide, but it would have
+ // to a full divide instruction. If done here the size is known at compile, and usually
+ // can be implemented by a right shift. The full divide takes ~50X longer than the shift.
+ size_t size = std::min(allocation.size() / sizeof(T), SkToSizeT(kMaxCapacity));
+ setData(SkSpan<T>(data, size));
+ }
+
+ void setData(SkSpan<T> array) {
+ fData = array.data();
+ fCapacity = SkToU32(array.size());
+ fOwnMemory = true;
+ }
+
+ // We disable Control-Flow Integrity sanitization (go/cfi) when casting item-array buffers.
+ // CFI flags this code as dangerous because we are casting `buffer` to a T* while the buffer's
+ // contents might still be uninitialized memory. When T has a vtable, this is especially risky
+ // because we could hypothetically access a virtual method on fItemArray and jump to an
+ // unpredictable location in memory. Of course, TArray won't actually use fItemArray in this
+ // way, and we don't want to construct a T before the user requests one. There's no real risk
+ // here, so disable CFI when doing these casts.
+#ifdef __clang__
+ SK_NO_SANITIZE("cfi")
+#elif defined(__GNUC__)
+ SK_ATTRIBUTE(no_sanitize_undefined)
+#endif
+ static T* TCast(void* buffer) {
+ return (T*)buffer;
+ }
+
+ size_t bytes(int n) const {
+ SkASSERT(n <= kMaxCapacity);
+ return SkToSizeT(n) * sizeof(T);
+ }
+
+ static SkSpan<std::byte> Allocate(int capacity, double growthFactor = 1.0) {
+ return SkContainerAllocator{sizeof(T), kMaxCapacity}.allocate(capacity, growthFactor);
+ }
+
+ void initData(int count) {
+ this->setDataFromBytes(Allocate(count));
+ fSize = count;
+ }
+
+ void destroyAll() {
+ if (!this->empty()) {
+ T* cursor = this->begin();
+ T* const end = this->end();
+ do {
+ cursor->~T();
+ cursor++;
+ } while (cursor < end);
+ }
+ }
+
+ /** In the following move and copy methods, 'dst' is assumed to be uninitialized raw storage.
+ * In the following move methods, 'src' is destroyed leaving behind uninitialized raw storage.
+ */
+ void copy(const T* src) {
+ if constexpr (std::is_trivially_copyable_v<T>) {
+ if (!this->empty() && src != nullptr) {
+ sk_careful_memcpy(fData, src, this->size_bytes());
+ }
+ } else {
+ for (int i = 0; i < this->size(); ++i) {
+ new (fData + i) T(src[i]);
+ }
+ }
+ }
+
+ void move(int dst, int src) {
+ if constexpr (MEM_MOVE) {
+ memcpy(static_cast<void*>(&fData[dst]),
+ static_cast<const void*>(&fData[src]),
+ sizeof(T));
+ } else {
+ new (&fData[dst]) T(std::move(fData[src]));
+ fData[src].~T();
+ }
+ }
+
+ void move(void* dst) {
+ if constexpr (MEM_MOVE) {
+ sk_careful_memcpy(dst, fData, this->bytes(fSize));
+ } else {
+ for (int i = 0; i < this->size(); ++i) {
+ new (static_cast<char*>(dst) + this->bytes(i)) T(std::move(fData[i]));
+ fData[i].~T();
+ }
+ }
+ }
+
+ // Helper function that makes space for n objects, adjusts the count, but does not initialize
+ // the new objects.
+ void* push_back_raw(int n) {
+ this->checkRealloc(n, kGrowing);
+ void* ptr = fData + fSize;
+ fSize += n;
+ return ptr;
+ }
+
+ void checkRealloc(int delta, double growthFactor) {
+ // This constant needs to be declared in the function where it is used to work around
+ // MSVC's persnickety nature about template definitions.
+ SkASSERT(delta >= 0);
+ SkASSERT(fSize >= 0);
+ SkASSERT(fCapacity >= 0);
+
+ // Return if there are enough remaining allocated elements to satisfy the request.
+ if (this->capacity() - fSize >= delta) {
+ return;
+ }
+
+ // Don't overflow fSize or size_t later in the memory allocation. Overflowing memory
+ // allocation really only applies to fSizes on 32-bit machines; on 64-bit machines this
+ // will probably never produce a check. Since kMaxCapacity is bounded above by INT_MAX,
+ // this also checks the bounds of fSize.
+ if (delta > kMaxCapacity - fSize) {
+ sk_report_container_overflow_and_die();
+ }
+ const int newCount = fSize + delta;
+
+ SkSpan<std::byte> allocation = Allocate(newCount, growthFactor);
+
+ this->move(TCast(allocation.data()));
+ if (fOwnMemory) {
+ sk_free(fData);
+ }
+ this->setDataFromBytes(allocation);
+ SkASSERT(this->capacity() >= newCount);
+ SkASSERT(fData != nullptr);
+ }
+
+ T* fData{nullptr};
+ int fSize{0};
+ uint32_t fOwnMemory : 1;
+ uint32_t fCapacity : 31;
+};
+
+template <typename T, bool M> static inline void swap(TArray<T, M>& a, TArray<T, M>& b) {
+ a.swap(b);
+}
+
+} // namespace skia_private
+
+/**
+ * Subclass of TArray that contains a preallocated memory block for the array.
+ */
+template <int N, typename T, bool MEM_MOVE = sk_is_trivially_relocatable_v<T>>
+class SkSTArray : private SkAlignedSTStorage<N,T>, public skia_private::TArray<T, MEM_MOVE> {
+private:
+ static_assert(N > 0);
+ using STORAGE = SkAlignedSTStorage<N,T>;
+ using INHERITED = skia_private::TArray<T, MEM_MOVE>;
+
+public:
+ SkSTArray()
+ : STORAGE{}, INHERITED(static_cast<STORAGE*>(this)) {}
+
+ SkSTArray(const T* array, int count)
+ : STORAGE{}, INHERITED(array, count, static_cast<STORAGE*>(this)) {}
+
+ SkSTArray(std::initializer_list<T> data) : SkSTArray(data.begin(), SkToInt(data.size())) {}
+
+ explicit SkSTArray(int reserveCount) : SkSTArray() {
+ this->reserve_back(reserveCount);
+ }
+
+ SkSTArray (const SkSTArray& that) : SkSTArray() { *this = that; }
+ explicit SkSTArray(const INHERITED& that) : SkSTArray() { *this = that; }
+ SkSTArray ( SkSTArray&& that) : SkSTArray() { *this = std::move(that); }
+ explicit SkSTArray( INHERITED&& that) : SkSTArray() { *this = std::move(that); }
+
+ SkSTArray& operator=(const SkSTArray& that) {
+ INHERITED::operator=(that);
+ return *this;
+ }
+ SkSTArray& operator=(const INHERITED& that) {
+ INHERITED::operator=(that);
+ return *this;
+ }
+
+ SkSTArray& operator=(SkSTArray&& that) {
+ INHERITED::operator=(std::move(that));
+ return *this;
+ }
+ SkSTArray& operator=(INHERITED&& that) {
+ INHERITED::operator=(std::move(that));
+ return *this;
+ }
+
+ // Force the use of TArray for data() and size().
+ using INHERITED::data;
+ using INHERITED::size;
+};
+
+// TODO: remove this typedef when all uses have been converted from SkTArray to TArray.
+template <typename T, bool MEM_MOVE = sk_is_trivially_relocatable_v<T>>
+using SkTArray = skia_private::TArray<T, MEM_MOVE>;
+
+#endif
diff --git a/gfx/skia/skia/include/private/base/SkTDArray.h b/gfx/skia/skia/include/private/base/SkTDArray.h
new file mode 100644
index 0000000000..b08d285378
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkTDArray.h
@@ -0,0 +1,236 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTDArray_DEFINED
+#define SkTDArray_DEFINED
+
+#include "include/private/base/SkAPI.h"
+#include "include/private/base/SkAssert.h"
+#include "include/private/base/SkTo.h"
+
+#include <algorithm>
+#include <cstddef>
+#include <initializer_list>
+
+class SK_SPI SkTDStorage {
+public:
+ explicit SkTDStorage(int sizeOfT);
+ SkTDStorage(const void* src, int size, int sizeOfT);
+
+ // Copy
+ SkTDStorage(const SkTDStorage& that);
+ SkTDStorage& operator= (const SkTDStorage& that);
+
+ // Move
+ SkTDStorage(SkTDStorage&& that);
+ SkTDStorage& operator= (SkTDStorage&& that);
+
+ ~SkTDStorage();
+
+ void reset();
+ void swap(SkTDStorage& that);
+
+ // Size routines
+ bool empty() const { return fSize == 0; }
+ void clear() { fSize = 0; }
+ int size() const { return fSize; }
+ void resize(int newSize);
+ size_t size_bytes() const { return this->bytes(fSize); }
+
+ // Capacity routines
+ int capacity() const { return fCapacity; }
+ void reserve(int newCapacity);
+ void shrink_to_fit();
+
+ void* data() { return fStorage; }
+ const void* data() const { return fStorage; }
+
+ // Deletion routines
+ void erase(int index, int count);
+ // Removes the entry at 'index' and replaces it with the last array element
+ void removeShuffle(int index);
+
+ // Insertion routines
+ void* prepend();
+
+ void append();
+ void append(int count);
+ void* append(const void* src, int count);
+
+ void* insert(int index);
+ void* insert(int index, int count, const void* src);
+
+ void pop_back() {
+ SkASSERT(fSize > 0);
+ fSize--;
+ }
+
+ friend bool operator==(const SkTDStorage& a, const SkTDStorage& b);
+ friend bool operator!=(const SkTDStorage& a, const SkTDStorage& b) {
+ return !(a == b);
+ }
+
+private:
+ size_t bytes(int n) const { return SkToSizeT(n * fSizeOfT); }
+ void* address(int n) { return fStorage + this->bytes(n); }
+
+ // Adds delta to fSize. Crash if outside [0, INT_MAX]
+ int calculateSizeOrDie(int delta);
+
+ // Move the tail of the array defined by the indexes tailStart and tailEnd to dstIndex. The
+ // elements at dstIndex are overwritten by the tail.
+ void moveTail(int dstIndex, int tailStart, int tailEnd);
+
+ // Copy src into the array at dstIndex.
+ void copySrc(int dstIndex, const void* src, int count);
+
+ const int fSizeOfT;
+ std::byte* fStorage{nullptr};
+ int fCapacity{0}; // size of the allocation in fArray (#elements)
+ int fSize{0}; // logical number of elements (fSize <= fCapacity)
+};
+
+static inline void swap(SkTDStorage& a, SkTDStorage& b) {
+ a.swap(b);
+}
+
+// SkTDArray<T> implements a std::vector-like array for raw data-only objects that do not require
+// construction or destruction. The constructor and destructor for T will not be called; T objects
+// will always be moved via raw memcpy. Newly created T objects will contain uninitialized memory.
+template <typename T> class SkTDArray {
+public:
+ SkTDArray() : fStorage{sizeof(T)} {}
+ SkTDArray(const T src[], int count) : fStorage{src, count, sizeof(T)} { }
+ SkTDArray(const std::initializer_list<T>& list) : SkTDArray(list.begin(), list.size()) {}
+
+ // Copy
+ SkTDArray(const SkTDArray<T>& src) : SkTDArray(src.data(), src.size()) {}
+ SkTDArray<T>& operator=(const SkTDArray<T>& src) {
+ fStorage = src.fStorage;
+ return *this;
+ }
+
+ // Move
+ SkTDArray(SkTDArray<T>&& src) : fStorage{std::move(src.fStorage)} {}
+ SkTDArray<T>& operator=(SkTDArray<T>&& src) {
+ fStorage = std::move(src.fStorage);
+ return *this;
+ }
+
+ friend bool operator==(const SkTDArray<T>& a, const SkTDArray<T>& b) {
+ return a.fStorage == b.fStorage;
+ }
+ friend bool operator!=(const SkTDArray<T>& a, const SkTDArray<T>& b) { return !(a == b); }
+
+ void swap(SkTDArray<T>& that) {
+ using std::swap;
+ swap(fStorage, that.fStorage);
+ }
+
+ bool empty() const { return fStorage.empty(); }
+
+ // Return the number of elements in the array
+ int size() const { return fStorage.size(); }
+
+ // Return the total number of elements allocated.
+ // Note: capacity() - size() gives you the number of elements you can add without causing an
+ // allocation.
+ int capacity() const { return fStorage.capacity(); }
+
+ // return the number of bytes in the array: count * sizeof(T)
+ size_t size_bytes() const { return fStorage.size_bytes(); }
+
+ T* data() { return static_cast<T*>(fStorage.data()); }
+ const T* data() const { return static_cast<const T*>(fStorage.data()); }
+ T* begin() { return this->data(); }
+ const T* begin() const { return this->data(); }
+ T* end() { return this->data() + this->size(); }
+ const T* end() const { return this->data() + this->size(); }
+
+ T& operator[](int index) {
+ SkASSERT(index < this->size());
+ return this->data()[index];
+ }
+ const T& operator[](int index) const {
+ SkASSERT(index < this->size());
+ return this->data()[index];
+ }
+
+ const T& back() const {
+ SkASSERT(this->size() > 0);
+ return this->data()[this->size() - 1];
+ }
+ T& back() {
+ SkASSERT(this->size() > 0);
+ return this->data()[this->size() - 1];
+ }
+
+ void reset() {
+ fStorage.reset();
+ }
+
+ void clear() {
+ fStorage.clear();
+ }
+
+ // Sets the number of elements in the array.
+ // If the array does not have space for count elements, it will increase
+ // the storage allocated to some amount greater than that required.
+ // It will never shrink the storage.
+ void resize(int count) {
+ fStorage.resize(count);
+ }
+
+ void reserve(int n) {
+ fStorage.reserve(n);
+ }
+
+ T* append() {
+ fStorage.append();
+ return this->end() - 1;
+ }
+ T* append(int count) {
+ fStorage.append(count);
+ return this->end() - count;
+ }
+ T* append(int count, const T* src) {
+ return static_cast<T*>(fStorage.append(src, count));
+ }
+
+ T* insert(int index) {
+ return static_cast<T*>(fStorage.insert(index));
+ }
+ T* insert(int index, int count, const T* src = nullptr) {
+ return static_cast<T*>(fStorage.insert(index, count, src));
+ }
+
+ void remove(int index, int count = 1) {
+ fStorage.erase(index, count);
+ }
+
+ void removeShuffle(int index) {
+ fStorage.removeShuffle(index);
+ }
+
+ // routines to treat the array like a stack
+ void push_back(const T& v) {
+ this->append();
+ this->back() = v;
+ }
+ void pop_back() { fStorage.pop_back(); }
+
+ void shrink_to_fit() {
+ fStorage.shrink_to_fit();
+ }
+
+private:
+ SkTDStorage fStorage;
+};
+
+template <typename T> static inline void swap(SkTDArray<T>& a, SkTDArray<T>& b) { a.swap(b); }
+
+#endif
diff --git a/gfx/skia/skia/include/private/base/SkTFitsIn.h b/gfx/skia/skia/include/private/base/SkTFitsIn.h
new file mode 100644
index 0000000000..365748abef
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkTFitsIn.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTFitsIn_DEFINED
+#define SkTFitsIn_DEFINED
+
+#include "include/private/base/SkDebug.h"
+
+#include <cstdint>
+#include <limits>
+#include <type_traits>
+
+/**
+ * std::underlying_type is only defined for enums. For integral types, we just want the type.
+ */
+template <typename T, class Enable = void>
+struct sk_strip_enum {
+ typedef T type;
+};
+
+template <typename T>
+struct sk_strip_enum<T, typename std::enable_if<std::is_enum<T>::value>::type> {
+ typedef typename std::underlying_type<T>::type type;
+};
+
+
+/**
+ * In C++ an unsigned to signed cast where the source value cannot be represented in the destination
+ * type results in an implementation defined destination value. Unlike C, C++ does not allow a trap.
+ * This makes "(S)(D)s == s" a possibly useful test. However, there are two cases where this is
+ * incorrect:
+ *
+ * when testing if a value of a smaller signed type can be represented in a larger unsigned type
+ * (int8_t)(uint16_t)-1 == -1 => (int8_t)0xFFFF == -1 => [implementation defined] == -1
+ *
+ * when testing if a value of a larger unsigned type can be represented in a smaller signed type
+ * (uint16_t)(int8_t)0xFFFF == 0xFFFF => (uint16_t)-1 == 0xFFFF => 0xFFFF == 0xFFFF => true.
+ *
+ * Consider the cases:
+ * u = unsigned, less digits
+ * U = unsigned, more digits
+ * s = signed, less digits
+ * S = signed, more digits
+ * v is the value we're considering.
+ *
+ * u -> U: (u)(U)v == v, trivially true
+ * U -> u: (U)(u)v == v, both casts well defined, test works
+ * s -> S: (s)(S)v == v, trivially true
+ * S -> s: (S)(s)v == v, first cast implementation value, second cast defined, test works
+ * s -> U: (s)(U)v == v, *this is bad*, the second cast results in implementation defined value
+ * S -> u: (S)(u)v == v, the second cast is required to prevent promotion of rhs to unsigned
+ * u -> S: (u)(S)v == v, trivially true
+ * U -> s: (U)(s)v == v, *this is bad*,
+ * first cast results in implementation defined value,
+ * second cast is defined. However, this creates false positives
+ * uint16_t x = 0xFFFF
+ * (uint16_t)(int8_t)x == x
+ * => (uint16_t)-1 == x
+ * => 0xFFFF == x
+ * => true
+ *
+ * So for the eight cases three are trivially true, three more are valid casts, and two are special.
+ * The two 'full' checks which otherwise require two comparisons are valid cast checks.
+ * The two remaining checks s -> U [v >= 0] and U -> s [v <= max(s)] can be done with one op.
+ */
+
+template <typename D, typename S>
+static constexpr inline
+typename std::enable_if<(std::is_integral<S>::value || std::is_enum<S>::value) &&
+ (std::is_integral<D>::value || std::is_enum<D>::value), bool>::type
+/*bool*/ SkTFitsIn(S src) {
+ // Ensure that is_signed and is_unsigned are passed the arithmetic underlyng types of enums.
+ using Sa = typename sk_strip_enum<S>::type;
+ using Da = typename sk_strip_enum<D>::type;
+
+ // SkTFitsIn() is used in public headers, so needs to be written targeting at most C++11.
+ return
+
+ // E.g. (int8_t)(uint8_t) int8_t(-1) == -1, but the uint8_t == 255, not -1.
+ (std::is_signed<Sa>::value && std::is_unsigned<Da>::value && sizeof(Sa) <= sizeof(Da)) ?
+ (S)0 <= src :
+
+ // E.g. (uint8_t)(int8_t) uint8_t(255) == 255, but the int8_t == -1.
+ (std::is_signed<Da>::value && std::is_unsigned<Sa>::value && sizeof(Da) <= sizeof(Sa)) ?
+ src <= (S)std::numeric_limits<Da>::max() :
+
+#if !defined(SK_DEBUG) && !defined(__MSVC_RUNTIME_CHECKS )
+ // Correct (simple) version. This trips up MSVC's /RTCc run-time checking.
+ (S)(D)src == src;
+#else
+ // More complex version that's safe with /RTCc. Used in all debug builds, for coverage.
+ (std::is_signed<Sa>::value) ?
+ (intmax_t)src >= (intmax_t)std::numeric_limits<Da>::min() &&
+ (intmax_t)src <= (intmax_t)std::numeric_limits<Da>::max() :
+
+ // std::is_unsigned<S> ?
+ (uintmax_t)src <= (uintmax_t)std::numeric_limits<Da>::max();
+#endif
+}
+
+#endif
diff --git a/gfx/skia/skia/include/private/base/SkTLogic.h b/gfx/skia/skia/include/private/base/SkTLogic.h
new file mode 100644
index 0000000000..26f363c946
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkTLogic.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ *
+ *
+ * This header provides some std:: features early in the skstd namespace
+ * and several Skia-specific additions in the sknonstd namespace.
+ */
+
+#ifndef SkTLogic_DEFINED
+#define SkTLogic_DEFINED
+
+#include <iterator>
+#include <type_traits>
+#include "include/private/base/SkTo.h"
+
+// The sknonstd namespace contains things we would like to be proposed and feel std-ish.
+namespace sknonstd {
+
+// The name 'copy' here is fraught with peril. In this case it means 'append', not 'overwrite'.
+// Alternate proposed names are 'propagate', 'augment', or 'append' (and 'add', but already taken).
+// std::experimental::propagate_const already exists for other purposes in TSv2.
+// These also follow the <dest, source> pattern used by boost.
+template <typename D, typename S> struct copy_const {
+ using type = std::conditional_t<std::is_const<S>::value, std::add_const_t<D>, D>;
+};
+template <typename D, typename S> using copy_const_t = typename copy_const<D, S>::type;
+
+template <typename D, typename S> struct copy_volatile {
+ using type = std::conditional_t<std::is_volatile<S>::value, std::add_volatile_t<D>, D>;
+};
+template <typename D, typename S> using copy_volatile_t = typename copy_volatile<D, S>::type;
+
+template <typename D, typename S> struct copy_cv {
+ using type = copy_volatile_t<copy_const_t<D, S>, S>;
+};
+template <typename D, typename S> using copy_cv_t = typename copy_cv<D, S>::type;
+
+// The name 'same' here means 'overwrite'.
+// Alternate proposed names are 'replace', 'transfer', or 'qualify_from'.
+// same_xxx<D, S> can be written as copy_xxx<remove_xxx_t<D>, S>
+template <typename D, typename S> using same_const = copy_const<std::remove_const_t<D>, S>;
+template <typename D, typename S> using same_const_t = typename same_const<D, S>::type;
+template <typename D, typename S> using same_volatile =copy_volatile<std::remove_volatile_t<D>,S>;
+template <typename D, typename S> using same_volatile_t = typename same_volatile<D, S>::type;
+template <typename D, typename S> using same_cv = copy_cv<std::remove_cv_t<D>, S>;
+template <typename D, typename S> using same_cv_t = typename same_cv<D, S>::type;
+
+} // namespace sknonstd
+
+template <typename Container>
+constexpr int SkCount(const Container& c) { return SkTo<int>(std::size(c)); }
+
+#endif
diff --git a/gfx/skia/skia/include/private/base/SkTPin.h b/gfx/skia/skia/include/private/base/SkTPin.h
new file mode 100644
index 0000000000..c824c44640
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkTPin.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2020 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTPin_DEFINED
+#define SkTPin_DEFINED
+
+#include <algorithm>
+
+/** @return x pinned (clamped) between lo and hi, inclusively.
+
+ Unlike std::clamp(), SkTPin() always returns a value between lo and hi.
+ If x is NaN, SkTPin() returns lo but std::clamp() returns NaN.
+*/
+template <typename T>
+static constexpr const T& SkTPin(const T& x, const T& lo, const T& hi) {
+ return std::max(lo, std::min(x, hi));
+}
+
+#endif
diff --git a/gfx/skia/skia/include/private/base/SkTemplates.h b/gfx/skia/skia/include/private/base/SkTemplates.h
new file mode 100644
index 0000000000..cbcf36c594
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkTemplates.h
@@ -0,0 +1,426 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTemplates_DEFINED
+#define SkTemplates_DEFINED
+
+#include "include/private/base/SkAlign.h"
+#include "include/private/base/SkAssert.h"
+#include "include/private/base/SkDebug.h"
+#include "include/private/base/SkMalloc.h"
+#include "include/private/base/SkTLogic.h"
+
+#include <array>
+#include <cstddef>
+#include <cstdint>
+#include <cstring>
+#include <memory>
+#include <type_traits>
+#include <utility>
+
+
+/** \file SkTemplates.h
+
+ This file contains light-weight template classes for type-safe and exception-safe
+ resource management.
+*/
+
+/**
+ * Marks a local variable as known to be unused (to avoid warnings).
+ * Note that this does *not* prevent the local variable from being optimized away.
+ */
+template<typename T> inline void sk_ignore_unused_variable(const T&) { }
+
+/**
+ * This is a general purpose absolute-value function.
+ * See SkAbs32 in (SkSafe32.h) for a 32-bit int specific version that asserts.
+ */
+template <typename T> static inline T SkTAbs(T value) {
+ if (value < 0) {
+ value = -value;
+ }
+ return value;
+}
+
+/**
+ * Returns a pointer to a D which comes immediately after S[count].
+ */
+template <typename D, typename S> inline D* SkTAfter(S* ptr, size_t count = 1) {
+ return reinterpret_cast<D*>(ptr + count);
+}
+
+/**
+ * Returns a pointer to a D which comes byteOffset bytes after S.
+ */
+template <typename D, typename S> inline D* SkTAddOffset(S* ptr, ptrdiff_t byteOffset) {
+ // The intermediate char* has the same cv-ness as D as this produces better error messages.
+ // This relies on the fact that reinterpret_cast can add constness, but cannot remove it.
+ return reinterpret_cast<D*>(reinterpret_cast<sknonstd::same_cv_t<char, D>*>(ptr) + byteOffset);
+}
+
+template <typename T, T* P> struct SkOverloadedFunctionObject {
+ template <typename... Args>
+ auto operator()(Args&&... args) const -> decltype(P(std::forward<Args>(args)...)) {
+ return P(std::forward<Args>(args)...);
+ }
+};
+
+template <auto F> using SkFunctionObject =
+ SkOverloadedFunctionObject<std::remove_pointer_t<decltype(F)>, F>;
+
+/** \class SkAutoTCallVProc
+
+ Call a function when this goes out of scope. The template uses two
+ parameters, the object, and a function that is to be called in the destructor.
+ If release() is called, the object reference is set to null. If the object
+ reference is null when the destructor is called, we do not call the
+ function.
+*/
+template <typename T, void (*P)(T*)> class SkAutoTCallVProc
+ : public std::unique_ptr<T, SkFunctionObject<P>> {
+ using inherited = std::unique_ptr<T, SkFunctionObject<P>>;
+public:
+ using inherited::inherited;
+ SkAutoTCallVProc(const SkAutoTCallVProc&) = delete;
+ SkAutoTCallVProc(SkAutoTCallVProc&& that) : inherited(std::move(that)) {}
+
+ operator T*() const { return this->get(); }
+};
+
+
+namespace skia_private {
+/** Allocate an array of T elements, and free the array in the destructor
+ */
+template <typename T> class AutoTArray {
+public:
+ AutoTArray() {}
+ /** Allocate count number of T elements
+ */
+ explicit AutoTArray(int count) {
+ SkASSERT(count >= 0);
+ if (count) {
+ fArray.reset(new T[count]);
+ }
+ SkDEBUGCODE(fCount = count;)
+ }
+
+ AutoTArray(AutoTArray&& other) : fArray(std::move(other.fArray)) {
+ SkDEBUGCODE(fCount = other.fCount; other.fCount = 0;)
+ }
+ AutoTArray& operator=(AutoTArray&& other) {
+ if (this != &other) {
+ fArray = std::move(other.fArray);
+ SkDEBUGCODE(fCount = other.fCount; other.fCount = 0;)
+ }
+ return *this;
+ }
+
+ /** Reallocates given a new count. Reallocation occurs even if new count equals old count.
+ */
+ void reset(int count = 0) { *this = AutoTArray(count); }
+
+ /** Return the array of T elements. Will be NULL if count == 0
+ */
+ T* get() const { return fArray.get(); }
+
+ /** Return the nth element in the array
+ */
+ T& operator[](int index) const {
+ SkASSERT((unsigned)index < (unsigned)fCount);
+ return fArray[index];
+ }
+
+ /** Aliases matching other types, like std::vector. */
+ const T* data() const { return fArray.get(); }
+ T* data() { return fArray.get(); }
+
+private:
+ std::unique_ptr<T[]> fArray;
+ SkDEBUGCODE(int fCount = 0;)
+};
+
+/** Wraps AutoTArray, with room for kCountRequested elements preallocated.
+ */
+template <int kCountRequested, typename T> class AutoSTArray {
+public:
+ AutoSTArray(AutoSTArray&&) = delete;
+ AutoSTArray(const AutoSTArray&) = delete;
+ AutoSTArray& operator=(AutoSTArray&&) = delete;
+ AutoSTArray& operator=(const AutoSTArray&) = delete;
+
+ /** Initialize with no objects */
+ AutoSTArray() {
+ fArray = nullptr;
+ fCount = 0;
+ }
+
+ /** Allocate count number of T elements
+ */
+ AutoSTArray(int count) {
+ fArray = nullptr;
+ fCount = 0;
+ this->reset(count);
+ }
+
+ ~AutoSTArray() {
+ this->reset(0);
+ }
+
+ /** Destroys previous objects in the array and default constructs count number of objects */
+ void reset(int count) {
+ T* start = fArray;
+ T* iter = start + fCount;
+ while (iter > start) {
+ (--iter)->~T();
+ }
+
+ SkASSERT(count >= 0);
+ if (fCount != count) {
+ if (fCount > kCount) {
+ // 'fArray' was allocated last time so free it now
+ SkASSERT((T*) fStorage != fArray);
+ sk_free(fArray);
+ }
+
+ if (count > kCount) {
+ fArray = (T*) sk_malloc_throw(count, sizeof(T));
+ } else if (count > 0) {
+ fArray = (T*) fStorage;
+ } else {
+ fArray = nullptr;
+ }
+
+ fCount = count;
+ }
+
+ iter = fArray;
+ T* stop = fArray + count;
+ while (iter < stop) {
+ new (iter++) T;
+ }
+ }
+
+ /** Return the number of T elements in the array
+ */
+ int count() const { return fCount; }
+
+ /** Return the array of T elements. Will be NULL if count == 0
+ */
+ T* get() const { return fArray; }
+
+ T* begin() { return fArray; }
+
+ const T* begin() const { return fArray; }
+
+ T* end() { return fArray + fCount; }
+
+ const T* end() const { return fArray + fCount; }
+
+ /** Return the nth element in the array
+ */
+ T& operator[](int index) const {
+ SkASSERT(index < fCount);
+ return fArray[index];
+ }
+
+ /** Aliases matching other types, like std::vector. */
+ const T* data() const { return fArray; }
+ T* data() { return fArray; }
+ size_t size() const { return fCount; }
+
+private:
+#if defined(SK_BUILD_FOR_GOOGLE3)
+ // Stack frame size is limited for SK_BUILD_FOR_GOOGLE3. 4k is less than the actual max,
+ // but some functions have multiple large stack allocations.
+ static const int kMaxBytes = 4 * 1024;
+ static const int kCount = kCountRequested * sizeof(T) > kMaxBytes
+ ? kMaxBytes / sizeof(T)
+ : kCountRequested;
+#else
+ static const int kCount = kCountRequested;
+#endif
+
+ int fCount;
+ T* fArray;
+ alignas(T) char fStorage[kCount * sizeof(T)];
+};
+
+/** Manages an array of T elements, freeing the array in the destructor.
+ * Does NOT call any constructors/destructors on T (T must be POD).
+ */
+template <typename T,
+ typename = std::enable_if_t<std::is_trivially_default_constructible<T>::value &&
+ std::is_trivially_destructible<T>::value>>
+class AutoTMalloc {
+public:
+ /** Takes ownership of the ptr. The ptr must be a value which can be passed to sk_free. */
+ explicit AutoTMalloc(T* ptr = nullptr) : fPtr(ptr) {}
+
+ /** Allocates space for 'count' Ts. */
+ explicit AutoTMalloc(size_t count)
+ : fPtr(count ? (T*)sk_malloc_throw(count, sizeof(T)) : nullptr) {}
+
+ AutoTMalloc(AutoTMalloc&&) = default;
+ AutoTMalloc& operator=(AutoTMalloc&&) = default;
+
+ /** Resize the memory area pointed to by the current ptr preserving contents. */
+ void realloc(size_t count) {
+ fPtr.reset(count ? (T*)sk_realloc_throw(fPtr.release(), count * sizeof(T)) : nullptr);
+ }
+
+ /** Resize the memory area pointed to by the current ptr without preserving contents. */
+ T* reset(size_t count = 0) {
+ fPtr.reset(count ? (T*)sk_malloc_throw(count, sizeof(T)) : nullptr);
+ return this->get();
+ }
+
+ T* get() const { return fPtr.get(); }
+
+ operator T*() { return fPtr.get(); }
+
+ operator const T*() const { return fPtr.get(); }
+
+ T& operator[](int index) { return fPtr.get()[index]; }
+
+ const T& operator[](int index) const { return fPtr.get()[index]; }
+
+ /** Aliases matching other types, like std::vector. */
+ const T* data() const { return fPtr.get(); }
+ T* data() { return fPtr.get(); }
+
+ /**
+ * Transfer ownership of the ptr to the caller, setting the internal
+ * pointer to NULL. Note that this differs from get(), which also returns
+ * the pointer, but it does not transfer ownership.
+ */
+ T* release() { return fPtr.release(); }
+
+private:
+ std::unique_ptr<T, SkOverloadedFunctionObject<void(void*), sk_free>> fPtr;
+};
+
+template <size_t kCountRequested,
+ typename T,
+ typename = std::enable_if_t<std::is_trivially_default_constructible<T>::value &&
+ std::is_trivially_destructible<T>::value>>
+class AutoSTMalloc {
+public:
+ AutoSTMalloc() : fPtr(fTStorage) {}
+
+ AutoSTMalloc(size_t count) {
+ if (count > kCount) {
+ fPtr = (T*)sk_malloc_throw(count, sizeof(T));
+ } else if (count) {
+ fPtr = fTStorage;
+ } else {
+ fPtr = nullptr;
+ }
+ }
+
+ AutoSTMalloc(AutoSTMalloc&&) = delete;
+ AutoSTMalloc(const AutoSTMalloc&) = delete;
+ AutoSTMalloc& operator=(AutoSTMalloc&&) = delete;
+ AutoSTMalloc& operator=(const AutoSTMalloc&) = delete;
+
+ ~AutoSTMalloc() {
+ if (fPtr != fTStorage) {
+ sk_free(fPtr);
+ }
+ }
+
+ // doesn't preserve contents
+ T* reset(size_t count) {
+ if (fPtr != fTStorage) {
+ sk_free(fPtr);
+ }
+ if (count > kCount) {
+ fPtr = (T*)sk_malloc_throw(count, sizeof(T));
+ } else if (count) {
+ fPtr = fTStorage;
+ } else {
+ fPtr = nullptr;
+ }
+ return fPtr;
+ }
+
+ T* get() const { return fPtr; }
+
+ operator T*() {
+ return fPtr;
+ }
+
+ operator const T*() const {
+ return fPtr;
+ }
+
+ T& operator[](int index) {
+ return fPtr[index];
+ }
+
+ const T& operator[](int index) const {
+ return fPtr[index];
+ }
+
+ /** Aliases matching other types, like std::vector. */
+ const T* data() const { return fPtr; }
+ T* data() { return fPtr; }
+
+ // Reallocs the array, can be used to shrink the allocation. Makes no attempt to be intelligent
+ void realloc(size_t count) {
+ if (count > kCount) {
+ if (fPtr == fTStorage) {
+ fPtr = (T*)sk_malloc_throw(count, sizeof(T));
+ memcpy((void*)fPtr, fTStorage, kCount * sizeof(T));
+ } else {
+ fPtr = (T*)sk_realloc_throw(fPtr, count, sizeof(T));
+ }
+ } else if (count) {
+ if (fPtr != fTStorage) {
+ fPtr = (T*)sk_realloc_throw(fPtr, count, sizeof(T));
+ }
+ } else {
+ this->reset(0);
+ }
+ }
+
+private:
+ // Since we use uint32_t storage, we might be able to get more elements for free.
+ static const size_t kCountWithPadding = SkAlign4(kCountRequested*sizeof(T)) / sizeof(T);
+#if defined(SK_BUILD_FOR_GOOGLE3)
+ // Stack frame size is limited for SK_BUILD_FOR_GOOGLE3. 4k is less than the actual max, but some functions
+ // have multiple large stack allocations.
+ static const size_t kMaxBytes = 4 * 1024;
+ static const size_t kCount = kCountRequested * sizeof(T) > kMaxBytes
+ ? kMaxBytes / sizeof(T)
+ : kCountWithPadding;
+#else
+ static const size_t kCount = kCountWithPadding;
+#endif
+
+ T* fPtr;
+ union {
+ uint32_t fStorage32[SkAlign4(kCount*sizeof(T)) >> 2];
+ T fTStorage[1]; // do NOT want to invoke T::T()
+ };
+};
+
+using UniqueVoidPtr = std::unique_ptr<void, SkOverloadedFunctionObject<void(void*), sk_free>>;
+
+} // namespace skia_private
+
+template<typename C, std::size_t... Is>
+constexpr auto SkMakeArrayFromIndexSequence(C c, std::index_sequence<Is...> is)
+-> std::array<decltype(c(std::declval<typename decltype(is)::value_type>())), sizeof...(Is)> {
+ return {{ c(Is)... }};
+}
+
+template<size_t N, typename C> constexpr auto SkMakeArray(C c)
+-> std::array<decltype(c(std::declval<typename std::index_sequence<N>::value_type>())), N> {
+ return SkMakeArrayFromIndexSequence(c, std::make_index_sequence<N>{});
+}
+
+#endif
diff --git a/gfx/skia/skia/include/private/base/SkThreadAnnotations.h b/gfx/skia/skia/include/private/base/SkThreadAnnotations.h
new file mode 100644
index 0000000000..fc2a4aacee
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkThreadAnnotations.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkThreadAnnotations_DEFINED
+#define SkThreadAnnotations_DEFINED
+
+// The bulk of this code is cribbed from:
+// http://clang.llvm.org/docs/ThreadSafetyAnalysis.html
+
+#if defined(__clang__) && (!defined(SWIG))
+#define SK_THREAD_ANNOTATION_ATTRIBUTE(x) __attribute__((x))
+#else
+#define SK_THREAD_ANNOTATION_ATTRIBUTE(x) // no-op
+#endif
+
+#define SK_CAPABILITY(x) \
+ SK_THREAD_ANNOTATION_ATTRIBUTE(capability(x))
+
+#define SK_SCOPED_CAPABILITY \
+ SK_THREAD_ANNOTATION_ATTRIBUTE(scoped_lockable)
+
+#define SK_GUARDED_BY(x) \
+ SK_THREAD_ANNOTATION_ATTRIBUTE(guarded_by(x))
+
+#define SK_PT_GUARDED_BY(x) \
+ SK_THREAD_ANNOTATION_ATTRIBUTE(pt_guarded_by(x))
+
+#define SK_ACQUIRED_BEFORE(...) \
+ SK_THREAD_ANNOTATION_ATTRIBUTE(acquired_before(__VA_ARGS__))
+
+#define SK_ACQUIRED_AFTER(...) \
+ SK_THREAD_ANNOTATION_ATTRIBUTE(acquired_after(__VA_ARGS__))
+
+#define SK_REQUIRES(...) \
+ SK_THREAD_ANNOTATION_ATTRIBUTE(requires_capability(__VA_ARGS__))
+
+#define SK_REQUIRES_SHARED(...) \
+ SK_THREAD_ANNOTATION_ATTRIBUTE(requires_shared_capability(__VA_ARGS__))
+
+#define SK_ACQUIRE(...) \
+ SK_THREAD_ANNOTATION_ATTRIBUTE(acquire_capability(__VA_ARGS__))
+
+#define SK_ACQUIRE_SHARED(...) \
+ SK_THREAD_ANNOTATION_ATTRIBUTE(acquire_shared_capability(__VA_ARGS__))
+
+// Would be SK_RELEASE, but that is already in use as SK_DEBUG vs. SK_RELEASE.
+#define SK_RELEASE_CAPABILITY(...) \
+ SK_THREAD_ANNOTATION_ATTRIBUTE(release_capability(__VA_ARGS__))
+
+// For symmetry with SK_RELEASE_CAPABILITY.
+#define SK_RELEASE_SHARED_CAPABILITY(...) \
+ SK_THREAD_ANNOTATION_ATTRIBUTE(release_shared_capability(__VA_ARGS__))
+
+#define SK_TRY_ACQUIRE(...) \
+ SK_THREAD_ANNOTATION_ATTRIBUTE(try_acquire_capability(__VA_ARGS__))
+
+#define SK_TRY_ACQUIRE_SHARED(...) \
+ SK_THREAD_ANNOTATION_ATTRIBUTE(try_acquire_shared_capability(__VA_ARGS__))
+
+#define SK_EXCLUDES(...) \
+ SK_THREAD_ANNOTATION_ATTRIBUTE(locks_excluded(__VA_ARGS__))
+
+#define SK_ASSERT_CAPABILITY(x) \
+ SK_THREAD_ANNOTATION_ATTRIBUTE(assert_capability(x))
+
+#define SK_ASSERT_SHARED_CAPABILITY(x) \
+ SK_THREAD_ANNOTATION_ATTRIBUTE(assert_shared_capability(x))
+
+#define SK_RETURN_CAPABILITY(x) \
+ SK_THREAD_ANNOTATION_ATTRIBUTE(lock_returned(x))
+
+#define SK_NO_THREAD_SAFETY_ANALYSIS \
+ SK_THREAD_ANNOTATION_ATTRIBUTE(no_thread_safety_analysis)
+
+#if defined(SK_BUILD_FOR_GOOGLE3) && !defined(SK_BUILD_FOR_WASM_IN_GOOGLE3)
+ extern "C" {
+ void __google_cxa_guard_acquire_begin(void);
+ void __google_cxa_guard_acquire_end (void);
+ }
+ #define SK_POTENTIALLY_BLOCKING_REGION_BEGIN __google_cxa_guard_acquire_begin()
+ #define SK_POTENTIALLY_BLOCKING_REGION_END __google_cxa_guard_acquire_end()
+#else
+ #define SK_POTENTIALLY_BLOCKING_REGION_BEGIN
+ #define SK_POTENTIALLY_BLOCKING_REGION_END
+#endif
+
+#endif // SkThreadAnnotations_DEFINED
diff --git a/gfx/skia/skia/include/private/base/SkThreadID.h b/gfx/skia/skia/include/private/base/SkThreadID.h
new file mode 100644
index 0000000000..18984884c9
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkThreadID.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkThreadID_DEFINED
+#define SkThreadID_DEFINED
+
+#include "include/private/base/SkAPI.h"
+#include "include/private/base/SkDebug.h"
+
+#include <cstdint>
+
+typedef int64_t SkThreadID;
+
+// SkMutex.h uses SkGetThreadID in debug only code.
+SkDEBUGCODE(SK_SPI) SkThreadID SkGetThreadID();
+
+const SkThreadID kIllegalThreadID = 0;
+
+#endif // SkThreadID_DEFINED
diff --git a/gfx/skia/skia/include/private/base/SkTo.h b/gfx/skia/skia/include/private/base/SkTo.h
new file mode 100644
index 0000000000..51ccafeeaf
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkTo.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkTo_DEFINED
+#define SkTo_DEFINED
+
+#include "include/private/base/SkAssert.h"
+#include "include/private/base/SkTFitsIn.h"
+
+#include <cstddef>
+#include <cstdint>
+
+template <typename D, typename S> constexpr D SkTo(S s) {
+ return SkASSERT(SkTFitsIn<D>(s)),
+ static_cast<D>(s);
+}
+
+template <typename S> constexpr int8_t SkToS8(S x) { return SkTo<int8_t>(x); }
+template <typename S> constexpr uint8_t SkToU8(S x) { return SkTo<uint8_t>(x); }
+template <typename S> constexpr int16_t SkToS16(S x) { return SkTo<int16_t>(x); }
+template <typename S> constexpr uint16_t SkToU16(S x) { return SkTo<uint16_t>(x); }
+template <typename S> constexpr int32_t SkToS32(S x) { return SkTo<int32_t>(x); }
+template <typename S> constexpr uint32_t SkToU32(S x) { return SkTo<uint32_t>(x); }
+template <typename S> constexpr int64_t SkToS64(S x) { return SkTo<int64_t>(x); }
+template <typename S> constexpr uint64_t SkToU64(S x) { return SkTo<uint64_t>(x); }
+template <typename S> constexpr int SkToInt(S x) { return SkTo<int>(x); }
+template <typename S> constexpr unsigned SkToUInt(S x) { return SkTo<unsigned>(x); }
+template <typename S> constexpr size_t SkToSizeT(S x) { return SkTo<size_t>(x); }
+
+/** @return false or true based on the condition
+*/
+template <typename T> static constexpr bool SkToBool(const T& x) {
+ return (bool)x;
+}
+
+#endif // SkTo_DEFINED
diff --git a/gfx/skia/skia/include/private/base/SkTypeTraits.h b/gfx/skia/skia/include/private/base/SkTypeTraits.h
new file mode 100644
index 0000000000..736f789776
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkTypeTraits.h
@@ -0,0 +1,33 @@
+// Copyright 2022 Google LLC
+// Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
+
+#ifndef SkTypeTraits_DEFINED
+#define SkTypeTraits_DEFINED
+
+#include <memory>
+#include <type_traits>
+
+// Trait for identifying types which are relocatable via memcpy, for container optimizations.
+template<typename, typename = void>
+struct sk_has_trivially_relocatable_member : std::false_type {};
+
+// Types can declare themselves trivially relocatable with a public
+// using sk_is_trivially_relocatable = std::true_type;
+template<typename T>
+struct sk_has_trivially_relocatable_member<T, std::void_t<typename T::sk_is_trivially_relocatable>>
+ : T::sk_is_trivially_relocatable {};
+
+// By default, all trivially copyable types are trivially relocatable.
+template <typename T>
+struct sk_is_trivially_relocatable
+ : std::disjunction<std::is_trivially_copyable<T>, sk_has_trivially_relocatable_member<T>>{};
+
+// Here be some dragons: while technically not guaranteed, we count on all sane unique_ptr
+// implementations to be trivially relocatable.
+template <typename T>
+struct sk_is_trivially_relocatable<std::unique_ptr<T>> : std::true_type {};
+
+template <typename T>
+inline constexpr bool sk_is_trivially_relocatable_v = sk_is_trivially_relocatable<T>::value;
+
+#endif // SkTypeTraits_DEFINED
diff --git a/gfx/skia/skia/include/private/chromium/GrSlug.h b/gfx/skia/skia/include/private/chromium/GrSlug.h
new file mode 100644
index 0000000000..56841c5b99
--- /dev/null
+++ b/gfx/skia/skia/include/private/chromium/GrSlug.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrSlug_DEFINED
+#define GrSlug_DEFINED
+
+#include "include/private/chromium/Slug.h"
+
+// TODO: Update Chrome to use sktext::gpu classes and remove these
+using GrSlug = sktext::gpu::Slug;
+
+#endif // GrSlug_DEFINED
diff --git a/gfx/skia/skia/include/private/chromium/GrVkSecondaryCBDrawContext.h b/gfx/skia/skia/include/private/chromium/GrVkSecondaryCBDrawContext.h
new file mode 100644
index 0000000000..51ed8a804d
--- /dev/null
+++ b/gfx/skia/skia/include/private/chromium/GrVkSecondaryCBDrawContext.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkSecondaryCBDrawContext_DEFINED
+#define GrVkSecondaryCBDrawContext_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSurfaceProps.h"
+#include "include/core/SkTypes.h"
+
+#include <memory>
+
+class GrBackendSemaphore;
+class GrRecordingContext;
+struct GrVkDrawableInfo;
+namespace skgpu::ganesh {
+class Device;
+}
+class SkCanvas;
+class SkDeferredDisplayList;
+struct SkImageInfo;
+class SkSurfaceCharacterization;
+class SkSurfaceProps;
+
+/**
+ * This class is a private header that is intended to only be used inside of Chromium. This requires
+ * Chromium to burrow in and include this specifically since it is not part of skia's public include
+ * directory.
+ */
+
+/**
+ * This class is used to draw into an external Vulkan secondary command buffer that is imported
+ * by the client. The secondary command buffer that gets imported must already have had begin called
+ * on it with VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT. Thus any draws to the imported
+ * command buffer cannot require changing the render pass. This requirement means that certain types
+ * of draws will not be supported when using a GrVkSecondaryCBDrawContext. This includes:
+ * Draws that require a dst copy for blending will be dropped
+ * Text draws will be dropped (these may require intermediate uploads of text data)
+ * Read and Write pixels will not work
+ * Any other draw that requires a copy will fail (this includes using backdrop filter with save
+ * layer).
+ * Stenciling is also disabled, but that should not restrict any actual draws from working.
+ *
+ * While using a GrVkSecondaryCBDrawContext, the client can also draw into normal SkSurfaces and
+ * then draw those SkSufaces (as SkImages) into the GrVkSecondaryCBDrawContext. If any of the
+ * previously mentioned unsupported draws are needed by the client, they can draw them into an
+ * offscreen surface, and then draw that into the GrVkSecondaryCBDrawContext.
+ *
+ * After all drawing to the GrVkSecondaryCBDrawContext has been done, the client must call flush()
+ * on the GrVkSecondaryCBDrawContext to actually fill in the secondary VkCommandBuffer with the
+ * draws.
+ *
+ * Additionally, the client must keep the GrVkSecondaryCBDrawContext alive until the secondary
+ * VkCommandBuffer has been submitted and all work finished on the GPU. Before deleting the
+ * GrVkSecondaryCBDrawContext, the client must call releaseResources() so that Skia can cleanup
+ * any internal objects that were created for the draws into the secondary command buffer.
+ */
+class SK_SPI GrVkSecondaryCBDrawContext : public SkRefCnt {
+public:
+ static sk_sp<GrVkSecondaryCBDrawContext> Make(GrRecordingContext*,
+ const SkImageInfo&,
+ const GrVkDrawableInfo&,
+ const SkSurfaceProps* props);
+
+ ~GrVkSecondaryCBDrawContext() override;
+
+ SkCanvas* getCanvas();
+
+ // Records all the draws to the imported secondary command buffer and sets any dependent
+ // offscreen draws to the GPU.
+ void flush();
+
+ /** Inserts a list of GPU semaphores that Skia will have the driver wait on before executing
+ commands for this secondary CB. The wait semaphores will get added to the VkCommandBuffer
+ owned by this GrContext when flush() is called, and not the command buffer which the
+ Secondary CB is from. This will guarantee that the driver waits on the semaphores before
+ the secondary command buffer gets executed. If this call returns false, then the GPU
+ back end will not wait on any passed in semaphores, and the client will still own the
+ semaphores, regardless of the value of deleteSemaphoresAfterWait.
+
+ If deleteSemaphoresAfterWait is false then Skia will not delete the semaphores. In this case
+ it is the client's responsibility to not destroy or attempt to reuse the semaphores until it
+ knows that Skia has finished waiting on them. This can be done by using finishedProcs
+ on flush calls.
+
+ @param numSemaphores size of waitSemaphores array
+ @param waitSemaphores array of semaphore containers
+ @paramm deleteSemaphoresAfterWait who owns and should delete the semaphores
+ @return true if GPU is waiting on semaphores
+ */
+ bool wait(int numSemaphores,
+ const GrBackendSemaphore waitSemaphores[],
+ bool deleteSemaphoresAfterWait = true);
+
+ // This call will release all resources held by the draw context. The client must call
+ // releaseResources() before deleting the drawing context. However, the resources also include
+ // any Vulkan resources that were created and used for draws. Therefore the client must only
+ // call releaseResources() after submitting the secondary command buffer, and waiting for it to
+ // finish on the GPU. If it is called earlier then some vulkan objects may be deleted while they
+ // are still in use by the GPU.
+ void releaseResources();
+
+ const SkSurfaceProps& props() const { return fProps; }
+
+ // TODO: Fill out these calls to support DDL
+ bool characterize(SkSurfaceCharacterization* characterization) const;
+
+#ifndef SK_DDL_IS_UNIQUE_POINTER
+ bool draw(sk_sp<const SkDeferredDisplayList> deferredDisplayList);
+#else
+ bool draw(const SkDeferredDisplayList* deferredDisplayList);
+#endif
+
+ bool isCompatible(const SkSurfaceCharacterization& characterization) const;
+
+private:
+ explicit GrVkSecondaryCBDrawContext(sk_sp<skgpu::ganesh::Device>, const SkSurfaceProps*);
+
+ sk_sp<skgpu::ganesh::Device> fDevice;
+ std::unique_ptr<SkCanvas> fCachedCanvas;
+ const SkSurfaceProps fProps;
+
+ using INHERITED = SkRefCnt;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/private/chromium/SkChromeRemoteGlyphCache.h b/gfx/skia/skia/include/private/chromium/SkChromeRemoteGlyphCache.h
new file mode 100644
index 0000000000..962d183b2d
--- /dev/null
+++ b/gfx/skia/skia/include/private/chromium/SkChromeRemoteGlyphCache.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright 2021 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkChromeRemoteGlyphCache_DEFINED
+#define SkChromeRemoteGlyphCache_DEFINED
+
+#include <memory>
+#include <vector>
+
+#include "include/core/SkData.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypeface.h"
+#include "include/utils/SkNoDrawCanvas.h"
+
+struct SkPackedGlyphID;
+class SkAutoDescriptor;
+class SkStrikeCache;
+class SkStrikeClientImpl;
+class SkStrikeServer;
+class SkStrikeServerImpl;
+namespace sktext::gpu { class Slug; }
+
+using SkDiscardableHandleId = uint32_t;
+// This class is not thread-safe.
+class SkStrikeServer {
+public:
+ // An interface used by the server to create handles for pinning SkStrike
+ // entries on the remote client.
+ class DiscardableHandleManager {
+ public:
+ SK_SPI virtual ~DiscardableHandleManager() = default;
+
+ // Creates a new *locked* handle and returns a unique ID that can be used to identify
+ // it on the remote client.
+ SK_SPI virtual SkDiscardableHandleId createHandle() = 0;
+
+ // Returns true if the handle could be successfully locked. The server can
+ // assume it will remain locked until the next set of serialized entries is
+ // pulled from the SkStrikeServer.
+ // If returns false, the cache entry mapped to the handle has been deleted
+ // on the client. Any subsequent attempts to lock the same handle are not
+ // allowed.
+ SK_SPI virtual bool lockHandle(SkDiscardableHandleId) = 0;
+
+ // Returns true if a handle has been deleted on the remote client. It is
+ // invalid to use a handle id again with this manager once this returns true.
+ SK_SPI virtual bool isHandleDeleted(SkDiscardableHandleId) = 0;
+ };
+
+ SK_SPI explicit SkStrikeServer(DiscardableHandleManager* discardableHandleManager);
+ SK_SPI ~SkStrikeServer();
+
+ // Create an analysis SkCanvas used to populate the SkStrikeServer with ops
+ // which will be serialized and rendered using the SkStrikeClient.
+ SK_API std::unique_ptr<SkCanvas> makeAnalysisCanvas(int width, int height,
+ const SkSurfaceProps& props,
+ sk_sp<SkColorSpace> colorSpace,
+ bool DFTSupport,
+ bool DFTPerspSupport = true);
+
+ // Serializes the strike data captured using a canvas returned by ::makeAnalysisCanvas. Any
+ // handles locked using the DiscardableHandleManager will be assumed to be
+ // unlocked after this call.
+ SK_SPI void writeStrikeData(std::vector<uint8_t>* memory);
+
+ // Testing helpers
+ void setMaxEntriesInDescriptorMapForTesting(size_t count);
+ size_t remoteStrikeMapSizeForTesting() const;
+
+private:
+ SkStrikeServerImpl* impl();
+
+ std::unique_ptr<SkStrikeServerImpl> fImpl;
+};
+
+class SkStrikeClient {
+public:
+ // This enum is used in histogram reporting in chromium. Please don't re-order the list of
+ // entries, and consider it to be append-only.
+ enum CacheMissType : uint32_t {
+ // Hard failures where no fallback could be found.
+ kFontMetrics = 0,
+ kGlyphMetrics = 1,
+ kGlyphImage = 2,
+ kGlyphPath = 3,
+
+ // (DEPRECATED) The original glyph could not be found and a fallback was used.
+ kGlyphMetricsFallback = 4,
+ kGlyphPathFallback = 5,
+
+ kGlyphDrawable = 6,
+ kLast = kGlyphDrawable
+ };
+
+ // An interface to delete handles that may be pinned by the remote server.
+ class DiscardableHandleManager : public SkRefCnt {
+ public:
+ ~DiscardableHandleManager() override = default;
+
+ // Returns true if the handle was unlocked and can be safely deleted. Once
+ // successful, subsequent attempts to delete the same handle are invalid.
+ virtual bool deleteHandle(SkDiscardableHandleId) = 0;
+
+ virtual void assertHandleValid(SkDiscardableHandleId) {}
+
+ virtual void notifyCacheMiss(CacheMissType type, int fontSize) = 0;
+
+ struct ReadFailureData {
+ size_t memorySize;
+ size_t bytesRead;
+ uint64_t typefaceSize;
+ uint64_t strikeCount;
+ uint64_t glyphImagesCount;
+ uint64_t glyphPathsCount;
+ };
+ virtual void notifyReadFailure(const ReadFailureData& data) {}
+ };
+
+ SK_SPI explicit SkStrikeClient(sk_sp<DiscardableHandleManager>,
+ bool isLogging = true,
+ SkStrikeCache* strikeCache = nullptr);
+ SK_SPI ~SkStrikeClient();
+
+ // Deserializes the strike data from a SkStrikeServer. All messages generated
+ // from a server when serializing the ops must be deserialized before the op
+ // is rasterized.
+ // Returns false if the data is invalid.
+ SK_SPI bool readStrikeData(const volatile void* memory, size_t memorySize);
+
+ // Given a descriptor re-write the Rec mapping the typefaceID from the renderer to the
+ // corresponding typefaceID on the GPU.
+ SK_SPI bool translateTypefaceID(SkAutoDescriptor* descriptor) const;
+
+ // Testing helpers
+ sk_sp<SkTypeface> retrieveTypefaceUsingServerIDForTest(SkTypefaceID) const;
+
+ // Given a buffer, unflatten into a slug making sure to do the typefaceID translation from
+ // renderer to GPU. Returns nullptr if there was a problem.
+ sk_sp<sktext::gpu::Slug> deserializeSlugForTest(const void* data, size_t size) const;
+
+private:
+ std::unique_ptr<SkStrikeClientImpl> fImpl;
+};
+#endif // SkChromeRemoteGlyphCache_DEFINED
diff --git a/gfx/skia/skia/include/private/chromium/SkDiscardableMemory.h b/gfx/skia/skia/include/private/chromium/SkDiscardableMemory.h
new file mode 100644
index 0000000000..ade4d71aa7
--- /dev/null
+++ b/gfx/skia/skia/include/private/chromium/SkDiscardableMemory.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDiscardableMemory_DEFINED
+#define SkDiscardableMemory_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypes.h"
+
+/**
+ * Interface for discardable memory. Implementation is provided by the
+ * embedder.
+ */
+class SK_SPI SkDiscardableMemory {
+public:
+ /**
+ * Factory method that creates, initializes and locks an SkDiscardableMemory
+ * object. If either of these steps fails, a nullptr pointer will be returned.
+ */
+ static SkDiscardableMemory* Create(size_t bytes);
+
+ /**
+ * Factory class that creates, initializes and locks an SkDiscardableMemory
+ * object. If either of these steps fails, a nullptr pointer will be returned.
+ */
+ class Factory : public SkRefCnt {
+ public:
+ virtual SkDiscardableMemory* create(size_t bytes) = 0;
+ private:
+ using INHERITED = SkRefCnt;
+ };
+
+ /** Must not be called while locked.
+ */
+ virtual ~SkDiscardableMemory() {}
+
+ /**
+ * Locks the memory, prevent it from being discarded. Once locked. you may
+ * obtain a pointer to that memory using the data() method.
+ *
+ * lock() may return false, indicating that the underlying memory was
+ * discarded and that the lock failed.
+ *
+ * Nested calls to lock are not allowed.
+ */
+ virtual bool SK_WARN_UNUSED_RESULT lock() = 0;
+
+ /**
+ * Returns the current pointer for the discardable memory. This call is ONLY
+ * valid when the discardable memory object is locked.
+ */
+ virtual void* data() = 0;
+
+ /**
+ * Unlock the memory so that it can be purged by the system. Must be called
+ * after every successful lock call.
+ */
+ virtual void unlock() = 0;
+
+protected:
+ SkDiscardableMemory() = default;
+ SkDiscardableMemory(const SkDiscardableMemory&) = delete;
+ SkDiscardableMemory& operator=(const SkDiscardableMemory&) = delete;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/private/chromium/Slug.h b/gfx/skia/skia/include/private/chromium/Slug.h
new file mode 100644
index 0000000000..6775af0fc6
--- /dev/null
+++ b/gfx/skia/skia/include/private/chromium/Slug.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef sktext_gpu_Slug_DEFINED
+#define sktext_gpu_Slug_DEFINED
+
+#include "include/core/SkData.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+
+class SkCanvas;
+class SkMatrix;
+class SkPaint;
+class SkTextBlob;
+class SkReadBuffer;
+class SkStrikeClient;
+class SkWriteBuffer;
+
+namespace sktext::gpu {
+// Slug encapsulates an SkTextBlob at a specific origin, using a specific paint. It can be
+// manipulated using matrix and clip changes to the canvas. If the canvas is transformed, then
+// the Slug will also transform with smaller glyphs using bi-linear interpolation to render. You
+// can think of a Slug as making a rubber stamp out of a SkTextBlob.
+class SK_API Slug : public SkRefCnt {
+public:
+ // Return nullptr if the blob would not draw. This is not because of clipping, but because of
+ // some paint optimization. The Slug is captured as if drawn using drawTextBlob.
+ static sk_sp<Slug> ConvertBlob(
+ SkCanvas* canvas, const SkTextBlob& blob, SkPoint origin, const SkPaint& paint);
+
+ // Serialize the slug.
+ sk_sp<SkData> serialize() const;
+ size_t serialize(void* buffer, size_t size) const;
+
+ // Set the client parameter to the appropriate SkStrikeClient when typeface ID translation
+ // is needed.
+ static sk_sp<Slug> Deserialize(
+ const void* data, size_t size, const SkStrikeClient* client = nullptr);
+ static sk_sp<Slug> MakeFromBuffer(SkReadBuffer& buffer);
+
+
+ // Draw the Slug obeying the canvas's mapping and clipping.
+ void draw(SkCanvas* canvas) const;
+
+ virtual SkRect sourceBounds() const = 0;
+ virtual SkRect sourceBoundsWithOrigin () const = 0;
+
+ // The paint passed into ConvertBlob; this paint is used instead of the paint resulting from
+ // the call to aboutToDraw because when we call draw(), the initial paint is needed to call
+ // aboutToDraw again to get the layer right.
+ virtual const SkPaint& initialPaint() const = 0;
+
+ virtual void doFlatten(SkWriteBuffer&) const = 0;
+
+ uint32_t uniqueID() const { return fUniqueID; }
+
+private:
+ static uint32_t NextUniqueID();
+ const uint32_t fUniqueID{NextUniqueID()};
+};
+} // namespace sktext::gpu
+
+#endif // sktext_gpu_Slug_DEFINED
diff --git a/gfx/skia/skia/include/private/gpu/ganesh/GrContext_Base.h b/gfx/skia/skia/include/private/gpu/ganesh/GrContext_Base.h
new file mode 100644
index 0000000000..ba7172e005
--- /dev/null
+++ b/gfx/skia/skia/include/private/gpu/ganesh/GrContext_Base.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrContext_Base_DEFINED
+#define GrContext_Base_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/gpu/GrBackendSurface.h"
+#include "include/gpu/GrContextOptions.h"
+#include "include/gpu/GrTypes.h"
+
+class GrBaseContextPriv;
+class GrCaps;
+class GrContextThreadSafeProxy;
+class GrDirectContext;
+class GrImageContext;
+class GrRecordingContext;
+enum class SkTextureCompressionType;
+
+class GrContext_Base : public SkRefCnt {
+public:
+ ~GrContext_Base() override;
+
+ /*
+ * Safely downcast to a GrDirectContext.
+ */
+ virtual GrDirectContext* asDirectContext() { return nullptr; }
+
+ /*
+ * The 3D API backing this context
+ */
+ SK_API GrBackendApi backend() const;
+
+ /*
+ * Retrieve the default GrBackendFormat for a given SkColorType and renderability.
+ * It is guaranteed that this backend format will be the one used by the GrContext
+ * SkColorType and SkSurfaceCharacterization-based createBackendTexture methods.
+ *
+ * The caller should check that the returned format is valid.
+ */
+ SK_API GrBackendFormat defaultBackendFormat(SkColorType, GrRenderable) const;
+
+ SK_API GrBackendFormat compressedBackendFormat(SkTextureCompressionType) const;
+
+ /**
+ * Gets the maximum supported sample count for a color type. 1 is returned if only non-MSAA
+ * rendering is supported for the color type. 0 is returned if rendering to this color type
+ * is not supported at all.
+ */
+ SK_API int maxSurfaceSampleCountForColorType(SkColorType colorType) const;
+
+ // TODO: When the public version is gone, rename to refThreadSafeProxy and add raw ptr ver.
+ sk_sp<GrContextThreadSafeProxy> threadSafeProxy();
+
+ // Provides access to functions that aren't part of the public API.
+ GrBaseContextPriv priv();
+ const GrBaseContextPriv priv() const; // NOLINT(readability-const-return-type)
+
+protected:
+ friend class GrBaseContextPriv; // for hidden functions
+
+ GrContext_Base(sk_sp<GrContextThreadSafeProxy>);
+
+ virtual bool init();
+
+ /**
+ * An identifier for this context. The id is used by all compatible contexts. For example,
+ * if SkImages are created on one thread using an image creation context, then fed into a
+ * DDL Recorder on second thread (which has a recording context) and finally replayed on
+ * a third thread with a direct context, then all three contexts will report the same id.
+ * It is an error for an image to be used with contexts that report different ids.
+ */
+ uint32_t contextID() const;
+
+ bool matches(GrContext_Base* candidate) const {
+ return candidate && candidate->contextID() == this->contextID();
+ }
+
+ /*
+ * The options in effect for this context
+ */
+ const GrContextOptions& options() const;
+
+ const GrCaps* caps() const;
+ sk_sp<const GrCaps> refCaps() const;
+
+ virtual GrImageContext* asImageContext() { return nullptr; }
+ virtual GrRecordingContext* asRecordingContext() { return nullptr; }
+
+ sk_sp<GrContextThreadSafeProxy> fThreadSafeProxy;
+
+private:
+ using INHERITED = SkRefCnt;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/private/gpu/ganesh/GrD3DTypesMinimal.h b/gfx/skia/skia/include/private/gpu/ganesh/GrD3DTypesMinimal.h
new file mode 100644
index 0000000000..26b7534476
--- /dev/null
+++ b/gfx/skia/skia/include/private/gpu/ganesh/GrD3DTypesMinimal.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrD3DTypesMinimal_DEFINED
+#define GrD3DTypesMinimal_DEFINED
+
+// Minimal definitions of Direct3D types, without including d3d12.h
+
+#include "include/core/SkRefCnt.h"
+
+#include <dxgiformat.h>
+
+#include "include/gpu/GrTypes.h"
+
+struct ID3D12Resource;
+class GrD3DResourceState;
+typedef int GrD3DResourceStateEnum;
+struct GrD3DSurfaceInfo;
+struct GrD3DTextureResourceInfo;
+struct GrD3DTextureResourceSpec;
+struct GrD3DFenceInfo;
+
+// This struct is to used to store the the actual information about the Direct3D backend image on
+// GrBackendTexture and GrBackendRenderTarget. When a client calls getD3DTextureInfo on a
+// GrBackendTexture/RenderTarget, we use the GrD3DBackendSurfaceInfo to create a snapshot
+// GrD3DTextureResourceInfo object. Internally, this uses a ref count GrD3DResourceState object to
+// track the current D3D12_RESOURCE_STATES which can be shared with an internal GrD3DTextureResource
+// so that state updates can be seen by all users of the texture.
+struct GrD3DBackendSurfaceInfo {
+ GrD3DBackendSurfaceInfo(const GrD3DTextureResourceInfo& info, GrD3DResourceState* state);
+
+ void cleanup();
+
+ GrD3DBackendSurfaceInfo& operator=(const GrD3DBackendSurfaceInfo&) = delete;
+
+ // Assigns the passed in GrD3DBackendSurfaceInfo to this object. if isValid is true we will also
+ // attempt to unref the old fLayout on this object.
+ void assign(const GrD3DBackendSurfaceInfo&, bool isValid);
+
+ void setResourceState(GrD3DResourceStateEnum state);
+
+ sk_sp<GrD3DResourceState> getGrD3DResourceState() const;
+
+ GrD3DTextureResourceInfo snapTextureResourceInfo() const;
+
+ bool isProtected() const;
+#if GR_TEST_UTILS
+ bool operator==(const GrD3DBackendSurfaceInfo& that) const;
+#endif
+
+private:
+ GrD3DTextureResourceInfo* fTextureResourceInfo;
+ GrD3DResourceState* fResourceState;
+};
+
+struct GrD3DTextureResourceSpecHolder {
+public:
+ GrD3DTextureResourceSpecHolder(const GrD3DSurfaceInfo&);
+
+ void cleanup();
+
+ GrD3DSurfaceInfo getSurfaceInfo(uint32_t sampleCount,
+ uint32_t levelCount,
+ skgpu::Protected isProtected) const;
+
+private:
+ GrD3DTextureResourceSpec* fSpec;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/private/gpu/ganesh/GrDawnTypesPriv.h b/gfx/skia/skia/include/private/gpu/ganesh/GrDawnTypesPriv.h
new file mode 100644
index 0000000000..ffcdc0eaaf
--- /dev/null
+++ b/gfx/skia/skia/include/private/gpu/ganesh/GrDawnTypesPriv.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDawnTypesPriv_DEFINED
+#define GrDawnTypesPriv_DEFINED
+
+#include "include/gpu/dawn/GrDawnTypes.h"
+
+struct GrDawnTextureSpec {
+ GrDawnTextureSpec() {}
+ GrDawnTextureSpec(const GrDawnSurfaceInfo& info) : fFormat(info.fFormat) {}
+
+ wgpu::TextureFormat fFormat;
+};
+
+GrDawnSurfaceInfo GrDawnTextureSpecToSurfaceInfo(const GrDawnTextureSpec& dawnSpec,
+ uint32_t sampleCount,
+ uint32_t levelCount,
+ skgpu::Protected isProtected);
+
+#endif
+
diff --git a/gfx/skia/skia/include/private/gpu/ganesh/GrGLTypesPriv.h b/gfx/skia/skia/include/private/gpu/ganesh/GrGLTypesPriv.h
new file mode 100644
index 0000000000..7db777487a
--- /dev/null
+++ b/gfx/skia/skia/include/private/gpu/ganesh/GrGLTypesPriv.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkRefCnt.h"
+#include "include/gpu/gl/GrGLTypes.h"
+
+#ifndef GrGLTypesPriv_DEFINED
+#define GrGLTypesPriv_DEFINED
+
+static constexpr int kGrGLColorFormatCount = static_cast<int>(GrGLFormat::kLastColorFormat) + 1;
+
+class GrGLTextureParameters : public SkNVRefCnt<GrGLTextureParameters> {
+public:
+ // We currently consider texture parameters invalid on all textures
+ // GrContext::resetContext(). We use this type to track whether instances of
+ // GrGLTextureParameters were updated before or after the most recent resetContext(). At 10
+ // resets / frame and 60fps a 64bit timestamp will overflow in about a billion years.
+ // TODO: Require clients to use GrBackendTexture::glTextureParametersModified() to invalidate
+ // texture parameters and get rid of timestamp checking.
+ using ResetTimestamp = uint64_t;
+
+ // This initializes the params to have an expired timestamp. They'll be considered invalid the
+ // first time the texture is used unless set() is called.
+ GrGLTextureParameters() = default;
+
+ // This is texture parameter state that is overridden when a non-zero sampler object is bound.
+ struct SamplerOverriddenState {
+ SamplerOverriddenState();
+ void invalidate();
+
+ GrGLenum fMinFilter;
+ GrGLenum fMagFilter;
+ GrGLenum fWrapS;
+ GrGLenum fWrapT;
+ GrGLfloat fMinLOD;
+ GrGLfloat fMaxLOD;
+ GrGLfloat fMaxAniso;
+ // We always want the border color to be transparent black, so no need to store 4 floats.
+ // Just track if it's been invalidated and no longer the default
+ bool fBorderColorInvalid;
+ };
+
+ // Texture parameter state that is not overridden by a bound sampler object.
+ struct NonsamplerState {
+ NonsamplerState();
+ void invalidate();
+
+ GrGLint fBaseMipMapLevel;
+ GrGLint fMaxMipmapLevel;
+ bool fSwizzleIsRGBA;
+ };
+
+ void invalidate();
+
+ ResetTimestamp resetTimestamp() const { return fResetTimestamp; }
+ const SamplerOverriddenState& samplerOverriddenState() const { return fSamplerOverriddenState; }
+ const NonsamplerState& nonsamplerState() const { return fNonsamplerState; }
+
+ // SamplerOverriddenState is optional because we don't track it when we're using sampler
+ // objects.
+ void set(const SamplerOverriddenState* samplerState,
+ const NonsamplerState& nonsamplerState,
+ ResetTimestamp currTimestamp);
+
+private:
+ static constexpr ResetTimestamp kExpiredTimestamp = 0;
+
+ SamplerOverriddenState fSamplerOverriddenState;
+ NonsamplerState fNonsamplerState;
+ ResetTimestamp fResetTimestamp = kExpiredTimestamp;
+};
+
+class GrGLBackendTextureInfo {
+public:
+ GrGLBackendTextureInfo(const GrGLTextureInfo& info, GrGLTextureParameters* params)
+ : fInfo(info), fParams(params) {}
+ GrGLBackendTextureInfo(const GrGLBackendTextureInfo&) = delete;
+ GrGLBackendTextureInfo& operator=(const GrGLBackendTextureInfo&) = delete;
+ const GrGLTextureInfo& info() const { return fInfo; }
+ GrGLTextureParameters* parameters() const { return fParams; }
+ sk_sp<GrGLTextureParameters> refParameters() const { return sk_ref_sp(fParams); }
+
+ void cleanup();
+ void assign(const GrGLBackendTextureInfo&, bool thisIsValid);
+
+private:
+ GrGLTextureInfo fInfo;
+ GrGLTextureParameters* fParams;
+};
+
+struct GrGLTextureSpec {
+ GrGLTextureSpec() : fTarget(0), fFormat(0) {}
+ GrGLTextureSpec(const GrGLSurfaceInfo& info) : fTarget(info.fTarget), fFormat(info.fFormat) {}
+
+ GrGLenum fTarget;
+ GrGLenum fFormat;
+};
+
+GrGLSurfaceInfo GrGLTextureSpecToSurfaceInfo(const GrGLTextureSpec& glSpec,
+ uint32_t sampleCount,
+ uint32_t levelCount,
+ skgpu::Protected isProtected);
+
+#endif
diff --git a/gfx/skia/skia/include/private/gpu/ganesh/GrImageContext.h b/gfx/skia/skia/include/private/gpu/ganesh/GrImageContext.h
new file mode 100644
index 0000000000..72fdd4433d
--- /dev/null
+++ b/gfx/skia/skia/include/private/gpu/ganesh/GrImageContext.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrImageContext_DEFINED
+#define GrImageContext_DEFINED
+
+#include "include/private/base/SingleOwner.h"
+#include "include/private/gpu/ganesh/GrContext_Base.h"
+
+class GrImageContextPriv;
+
+// This is now just a view on a ThreadSafeProxy, that SkImages can attempt to
+// downcast to a GrDirectContext as a backdoor to some operations. Once we remove the backdoors,
+// this goes away and SkImages just hold ThreadSafeProxies.
+class GrImageContext : public GrContext_Base {
+public:
+ ~GrImageContext() override;
+
+ // Provides access to functions that aren't part of the public API.
+ GrImageContextPriv priv();
+ const GrImageContextPriv priv() const; // NOLINT(readability-const-return-type)
+
+protected:
+ friend class GrImageContextPriv; // for hidden functions
+
+ GrImageContext(sk_sp<GrContextThreadSafeProxy>);
+
+ SK_API virtual void abandonContext();
+ SK_API virtual bool abandoned();
+
+ /** This is only useful for debug purposes */
+ skgpu::SingleOwner* singleOwner() const { return &fSingleOwner; }
+
+ GrImageContext* asImageContext() override { return this; }
+
+private:
+ // When making promise images, we currently need a placeholder GrImageContext instance to give
+ // to the SkImage that has no real power, just a wrapper around the ThreadSafeProxy.
+ // TODO: De-power SkImage to ThreadSafeProxy or at least figure out a way to share one instance.
+ static sk_sp<GrImageContext> MakeForPromiseImage(sk_sp<GrContextThreadSafeProxy>);
+
+ // In debug builds we guard against improper thread handling
+ // This guard is passed to the GrDrawingManager and, from there to all the
+ // GrSurfaceDrawContexts. It is also passed to the GrResourceProvider and SkGpuDevice.
+ // TODO: Move this down to GrRecordingContext.
+ mutable skgpu::SingleOwner fSingleOwner;
+
+ using INHERITED = GrContext_Base;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/private/gpu/ganesh/GrMockTypesPriv.h b/gfx/skia/skia/include/private/gpu/ganesh/GrMockTypesPriv.h
new file mode 100644
index 0000000000..59a608dcfc
--- /dev/null
+++ b/gfx/skia/skia/include/private/gpu/ganesh/GrMockTypesPriv.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrMockTypesPriv_DEFINED
+#define GrMockTypesPriv_DEFINED
+
+#include "include/core/SkTextureCompressionType.h"
+#include "include/gpu/mock/GrMockTypes.h"
+
+struct GrMockTextureSpec {
+ GrMockTextureSpec()
+ : fColorType(GrColorType::kUnknown)
+ , fCompressionType(SkTextureCompressionType::kNone) {}
+ GrMockTextureSpec(const GrMockSurfaceInfo& info)
+ : fColorType(info.fColorType)
+ , fCompressionType(info.fCompressionType) {}
+
+ GrColorType fColorType = GrColorType::kUnknown;
+ SkTextureCompressionType fCompressionType = SkTextureCompressionType::kNone;
+};
+
+GrMockSurfaceInfo GrMockTextureSpecToSurfaceInfo(const GrMockTextureSpec& mockSpec,
+ uint32_t sampleCount,
+ uint32_t levelCount,
+ GrProtected isProtected);
+
+#endif
+
diff --git a/gfx/skia/skia/include/private/gpu/ganesh/GrMtlTypesPriv.h b/gfx/skia/skia/include/private/gpu/ganesh/GrMtlTypesPriv.h
new file mode 100644
index 0000000000..ef65848b5e
--- /dev/null
+++ b/gfx/skia/skia/include/private/gpu/ganesh/GrMtlTypesPriv.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2021 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrMtlTypesPriv_DEFINED
+#define GrMtlTypesPriv_DEFINED
+
+#include "include/gpu/GrTypes.h"
+#include "include/gpu/mtl/GrMtlTypes.h"
+
+///////////////////////////////////////////////////////////////////////////////
+
+#ifdef __APPLE__
+
+#include <TargetConditionals.h>
+
+#if defined(SK_BUILD_FOR_MAC)
+#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 110000
+#define GR_METAL_SDK_VERSION 230
+#elif __MAC_OS_X_VERSION_MAX_ALLOWED >= 101500
+#define GR_METAL_SDK_VERSION 220
+#elif __MAC_OS_X_VERSION_MAX_ALLOWED >= 101400
+#define GR_METAL_SDK_VERSION 210
+#else
+#error Must use at least 10.14 SDK to build Metal backend for MacOS
+#endif
+#else
+#if __IPHONE_OS_VERSION_MAX_ALLOWED >= 140000 || __TV_OS_VERSION_MAX_ALLOWED >= 140000
+#define GR_METAL_SDK_VERSION 230
+#elif __IPHONE_OS_VERSION_MAX_ALLOWED >= 130000 || __TV_OS_VERSION_MAX_ALLOWED >= 130000
+#define GR_METAL_SDK_VERSION 220
+#elif __IPHONE_OS_VERSION_MAX_ALLOWED >= 120000 || __TV_OS_VERSION_MAX_ALLOWED >= 120000
+#define GR_METAL_SDK_VERSION 210
+#else
+#error Must use at least 12.00 SDK to build Metal backend for iOS
+#endif
+#endif
+
+#if __has_feature(objc_arc) && __has_attribute(objc_externally_retained)
+#define GR_NORETAIN __attribute__((objc_externally_retained))
+#define GR_NORETAIN_BEGIN \
+ _Pragma("clang attribute push (__attribute__((objc_externally_retained)), apply_to=any(function,objc_method))")
+#define GR_NORETAIN_END _Pragma("clang attribute pop")
+#else
+#define GR_NORETAIN
+#define GR_NORETAIN_BEGIN
+#define GR_NORETAIN_END
+#endif
+
+struct GrMtlTextureSpec {
+ GrMtlTextureSpec()
+ : fFormat(0)
+ , fUsage(0)
+ , fStorageMode(0) {}
+ GrMtlTextureSpec(const GrMtlSurfaceInfo& info)
+ : fFormat(info.fFormat)
+ , fUsage(info.fUsage)
+ , fStorageMode(info.fStorageMode) {}
+
+ GrMTLPixelFormat fFormat;
+ GrMTLTextureUsage fUsage;
+ GrMTLStorageMode fStorageMode;
+};
+
+GrMtlSurfaceInfo GrMtlTextureSpecToSurfaceInfo(const GrMtlTextureSpec& mtlSpec,
+ uint32_t sampleCount,
+ uint32_t levelCount,
+ skgpu::Protected isProtected);
+
+#endif // __APPLE__
+
+#endif // GrMtlTypesPriv_DEFINED
diff --git a/gfx/skia/skia/include/private/gpu/ganesh/GrTypesPriv.h b/gfx/skia/skia/include/private/gpu/ganesh/GrTypesPriv.h
new file mode 100644
index 0000000000..fb8688de0d
--- /dev/null
+++ b/gfx/skia/skia/include/private/gpu/ganesh/GrTypesPriv.h
@@ -0,0 +1,1042 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrTypesPriv_DEFINED
+#define GrTypesPriv_DEFINED
+
+#include "include/core/SkColor.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTextureCompressionType.h"
+#include "include/gpu/GrTypes.h"
+#include "include/private/base/SkMacros.h"
+#include "include/private/base/SkTypeTraits.h"
+
+#include <chrono>
+#include <functional>
+
+class GrBackendFormat;
+class GrCaps;
+class GrSurfaceProxy;
+
+#ifdef MOZ_SKIA
+#include "mozilla/TimeStamp.h"
+
+struct GrStdSteadyClock
+{
+ typedef mozilla::TimeStamp time_point;
+
+ static time_point now() {
+ return mozilla::TimeStamp::NowLoRes();
+ }
+};
+
+static inline GrStdSteadyClock::time_point
+operator-(GrStdSteadyClock::time_point t, std::chrono::milliseconds ms) {
+ return t - mozilla::TimeDuration::FromMilliseconds(ms.count());
+}
+
+#else
+
+// The old libstdc++ uses the draft name "monotonic_clock" rather than "steady_clock". This might
+// not actually be monotonic, depending on how libstdc++ was built. However, this is only currently
+// used for idle resource purging so it shouldn't cause a correctness problem.
+#if defined(__GLIBCXX__) && (__GLIBCXX__ < 20130000)
+using GrStdSteadyClock = std::chrono::monotonic_clock;
+#else
+using GrStdSteadyClock = std::chrono::steady_clock;
+#endif
+
+#endif
+
+/**
+ * divide, rounding up
+ */
+
+static inline constexpr size_t GrSizeDivRoundUp(size_t x, size_t y) { return (x + (y - 1)) / y; }
+
+/**
+ * Geometric primitives used for drawing.
+ */
+enum class GrPrimitiveType : uint8_t {
+ kTriangles,
+ kTriangleStrip,
+ kPoints,
+ kLines, // 1 pix wide only
+ kLineStrip, // 1 pix wide only
+};
+static constexpr int kNumGrPrimitiveTypes = (int)GrPrimitiveType::kLineStrip + 1;
+
+static constexpr bool GrIsPrimTypeLines(GrPrimitiveType type) {
+ return GrPrimitiveType::kLines == type || GrPrimitiveType::kLineStrip == type;
+}
+
+enum class GrPrimitiveRestart : bool {
+ kNo = false,
+ kYes = true
+};
+
+/**
+ * Should a created surface be texturable?
+ */
+enum class GrTexturable : bool {
+ kNo = false,
+ kYes = true
+};
+
+// A DDL recorder has its own proxy provider and proxy cache. This enum indicates if
+// a given proxy provider is one of these special ones.
+enum class GrDDLProvider : bool {
+ kNo = false,
+ kYes = true
+};
+
+/** Ownership rules for external GPU resources imported into Skia. */
+enum GrWrapOwnership {
+ /** Skia will assume the client will keep the resource alive and Skia will not free it. */
+ kBorrow_GrWrapOwnership,
+
+ /** Skia will assume ownership of the resource and free it. */
+ kAdopt_GrWrapOwnership,
+};
+
+enum class GrWrapCacheable : bool {
+ /**
+ * The wrapped resource will be removed from the cache as soon as it becomes purgeable. It may
+ * still be assigned and found by a unique key, but the presence of the key will not be used to
+ * keep the resource alive when it has no references.
+ */
+ kNo = false,
+ /**
+ * The wrapped resource is allowed to remain in the GrResourceCache when it has no references
+ * but has a unique key. Such resources should only be given unique keys when it is known that
+ * the key will eventually be removed from the resource or invalidated via the message bus.
+ */
+ kYes = true
+};
+
+enum class GrBudgetedType : uint8_t {
+ /** The resource is budgeted and is subject to purging under budget pressure. */
+ kBudgeted,
+ /**
+ * The resource is unbudgeted and is purged as soon as it has no refs regardless of whether
+ * it has a unique or scratch key.
+ */
+ kUnbudgetedUncacheable,
+ /**
+ * The resource is unbudgeted and is allowed to remain in the cache with no refs if it
+ * has a unique key. Scratch keys are ignored.
+ */
+ kUnbudgetedCacheable,
+};
+
+enum class GrScissorTest : bool {
+ kDisabled = false,
+ kEnabled = true
+};
+
+/*
+ * Used to say whether texture is backed by memory.
+ */
+enum class GrMemoryless : bool {
+ /**
+ * The texture will be allocated normally and will affect memory budgets.
+ */
+ kNo = false,
+ /**
+ * The texture will be not use GPU memory and will not affect memory budgets.
+ */
+ kYes = true
+};
+
+struct GrMipLevel {
+ const void* fPixels = nullptr;
+ size_t fRowBytes = 0;
+ // This may be used to keep fPixels from being freed while a GrMipLevel exists.
+ sk_sp<SkData> fOptionalStorage;
+
+ static_assert(::sk_is_trivially_relocatable<decltype(fPixels)>::value);
+ static_assert(::sk_is_trivially_relocatable<decltype(fOptionalStorage)>::value);
+
+ using sk_is_trivially_relocatable = std::true_type;
+};
+
+enum class GrSemaphoreWrapType {
+ kWillSignal,
+ kWillWait,
+};
+
+/**
+ * This enum is used to specify the load operation to be used when an OpsTask/GrOpsRenderPass
+ * begins execution.
+ */
+enum class GrLoadOp {
+ kLoad,
+ kClear,
+ kDiscard,
+};
+
+/**
+ * This enum is used to specify the store operation to be used when an OpsTask/GrOpsRenderPass
+ * ends execution.
+ */
+enum class GrStoreOp {
+ kStore,
+ kDiscard,
+};
+
+/**
+ * Used to control antialiasing in draw calls.
+ */
+enum class GrAA : bool {
+ kNo = false,
+ kYes = true
+};
+
+enum class GrFillRule : bool {
+ kNonzero,
+ kEvenOdd
+};
+
+inline GrFillRule GrFillRuleForPathFillType(SkPathFillType fillType) {
+ switch (fillType) {
+ case SkPathFillType::kWinding:
+ case SkPathFillType::kInverseWinding:
+ return GrFillRule::kNonzero;
+ case SkPathFillType::kEvenOdd:
+ case SkPathFillType::kInverseEvenOdd:
+ return GrFillRule::kEvenOdd;
+ }
+ SkUNREACHABLE;
+}
+
+inline GrFillRule GrFillRuleForSkPath(const SkPath& path) {
+ return GrFillRuleForPathFillType(path.getFillType());
+}
+
+/** This enum indicates the type of antialiasing to be performed. */
+enum class GrAAType : unsigned {
+ /** No antialiasing */
+ kNone,
+ /** Use fragment shader code to blend with a fractional pixel coverage. */
+ kCoverage,
+ /** Use normal MSAA. */
+ kMSAA,
+
+ kLast = kMSAA
+};
+static const int kGrAATypeCount = static_cast<int>(GrAAType::kLast) + 1;
+
+static constexpr bool GrAATypeIsHW(GrAAType type) {
+ switch (type) {
+ case GrAAType::kNone:
+ return false;
+ case GrAAType::kCoverage:
+ return false;
+ case GrAAType::kMSAA:
+ return true;
+ }
+ SkUNREACHABLE;
+}
+
+/**
+ * Some pixel configs are inherently clamped to [0,1], some are allowed to go outside that range,
+ * and some are FP but manually clamped in the XP.
+ */
+enum class GrClampType {
+ kAuto, // Normalized, fixed-point configs
+ kManual, // Clamped FP configs
+ kNone, // Normal (unclamped) FP configs
+};
+
+/**
+ * A number of rectangle/quadrilateral drawing APIs can control anti-aliasing on a per edge basis.
+ * These masks specify which edges are AA'ed. The intent for this is to support tiling with seamless
+ * boundaries, where the inner edges are non-AA and the outer edges are AA. Regular rectangle draws
+ * simply use kAll or kNone depending on if they want anti-aliasing or not.
+ *
+ * In APIs that support per-edge AA, GrQuadAAFlags is the only AA-control parameter that is
+ * provided (compared to the typical GrAA parameter). kNone is equivalent to GrAA::kNo, and any
+ * other set of edge flags would require GrAA::kYes (with rendering output dependent on how that
+ * maps to GrAAType for a given SurfaceDrawContext).
+ *
+ * These values are identical to SkCanvas::QuadAAFlags.
+ */
+enum class GrQuadAAFlags {
+ kLeft = 0b0001,
+ kTop = 0b0010,
+ kRight = 0b0100,
+ kBottom = 0b1000,
+
+ kNone = 0b0000,
+ kAll = 0b1111,
+};
+
+GR_MAKE_BITFIELD_CLASS_OPS(GrQuadAAFlags)
+
+static inline GrQuadAAFlags SkToGrQuadAAFlags(unsigned flags) {
+ return static_cast<GrQuadAAFlags>(flags);
+}
+
+/**
+ * The type of texture. Backends other than GL currently only use the 2D value but the type must
+ * still be known at the API-neutral layer as it used to determine whether MIP maps, renderability,
+ * and sampling parameters are legal for proxies that will be instantiated with wrapped textures.
+ */
+enum class GrTextureType {
+ kNone,
+ k2D,
+ /* Rectangle uses unnormalized texture coordinates. */
+ kRectangle,
+ kExternal
+};
+
+enum GrShaderType {
+ kVertex_GrShaderType,
+ kFragment_GrShaderType,
+
+ kLastkFragment_GrShaderType = kFragment_GrShaderType
+};
+static const int kGrShaderTypeCount = kLastkFragment_GrShaderType + 1;
+
+enum GrShaderFlags {
+ kNone_GrShaderFlags = 0,
+ kVertex_GrShaderFlag = 1 << 0,
+ kFragment_GrShaderFlag = 1 << 1
+};
+SK_MAKE_BITFIELD_OPS(GrShaderFlags)
+
+/** Rectangle and external textures only support the clamp wrap mode and do not support
+ * MIP maps.
+ */
+static inline bool GrTextureTypeHasRestrictedSampling(GrTextureType type) {
+ switch (type) {
+ case GrTextureType::k2D:
+ return false;
+ case GrTextureType::kRectangle:
+ return true;
+ case GrTextureType::kExternal:
+ return true;
+ default:
+ SK_ABORT("Unexpected texture type");
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Types used to describe format of vertices in arrays.
+ */
+enum GrVertexAttribType {
+ kFloat_GrVertexAttribType = 0,
+ kFloat2_GrVertexAttribType,
+ kFloat3_GrVertexAttribType,
+ kFloat4_GrVertexAttribType,
+ kHalf_GrVertexAttribType,
+ kHalf2_GrVertexAttribType,
+ kHalf4_GrVertexAttribType,
+
+ kInt2_GrVertexAttribType, // vector of 2 32-bit ints
+ kInt3_GrVertexAttribType, // vector of 3 32-bit ints
+ kInt4_GrVertexAttribType, // vector of 4 32-bit ints
+
+
+ kByte_GrVertexAttribType, // signed byte
+ kByte2_GrVertexAttribType, // vector of 2 8-bit signed bytes
+ kByte4_GrVertexAttribType, // vector of 4 8-bit signed bytes
+ kUByte_GrVertexAttribType, // unsigned byte
+ kUByte2_GrVertexAttribType, // vector of 2 8-bit unsigned bytes
+ kUByte4_GrVertexAttribType, // vector of 4 8-bit unsigned bytes
+
+ kUByte_norm_GrVertexAttribType, // unsigned byte, e.g. coverage, 0 -> 0.0f, 255 -> 1.0f.
+ kUByte4_norm_GrVertexAttribType, // vector of 4 unsigned bytes, e.g. colors, 0 -> 0.0f,
+ // 255 -> 1.0f.
+
+ kShort2_GrVertexAttribType, // vector of 2 16-bit shorts.
+ kShort4_GrVertexAttribType, // vector of 4 16-bit shorts.
+
+ kUShort2_GrVertexAttribType, // vector of 2 unsigned shorts. 0 -> 0, 65535 -> 65535.
+ kUShort2_norm_GrVertexAttribType, // vector of 2 unsigned shorts. 0 -> 0.0f, 65535 -> 1.0f.
+
+ kInt_GrVertexAttribType,
+ kUInt_GrVertexAttribType,
+
+ kUShort_norm_GrVertexAttribType,
+
+ kUShort4_norm_GrVertexAttribType, // vector of 4 unsigned shorts. 0 -> 0.0f, 65535 -> 1.0f.
+
+ kLast_GrVertexAttribType = kUShort4_norm_GrVertexAttribType
+};
+static const int kGrVertexAttribTypeCount = kLast_GrVertexAttribType + 1;
+
+//////////////////////////////////////////////////////////////////////////////
+
+/**
+ * We have coverage effects that clip rendering to the edge of some geometric primitive.
+ * This enum specifies how that clipping is performed. Not all factories that take a
+ * GrClipEdgeType will succeed with all values and it is up to the caller to verify success.
+ */
+enum class GrClipEdgeType {
+ kFillBW,
+ kFillAA,
+ kInverseFillBW,
+ kInverseFillAA,
+
+ kLast = kInverseFillAA
+};
+static const int kGrClipEdgeTypeCnt = (int) GrClipEdgeType::kLast + 1;
+
+static constexpr bool GrClipEdgeTypeIsFill(const GrClipEdgeType edgeType) {
+ return (GrClipEdgeType::kFillAA == edgeType || GrClipEdgeType::kFillBW == edgeType);
+}
+
+static constexpr bool GrClipEdgeTypeIsInverseFill(const GrClipEdgeType edgeType) {
+ return (GrClipEdgeType::kInverseFillAA == edgeType ||
+ GrClipEdgeType::kInverseFillBW == edgeType);
+}
+
+static constexpr bool GrClipEdgeTypeIsAA(const GrClipEdgeType edgeType) {
+ return (GrClipEdgeType::kFillBW != edgeType &&
+ GrClipEdgeType::kInverseFillBW != edgeType);
+}
+
+static inline GrClipEdgeType GrInvertClipEdgeType(const GrClipEdgeType edgeType) {
+ switch (edgeType) {
+ case GrClipEdgeType::kFillBW:
+ return GrClipEdgeType::kInverseFillBW;
+ case GrClipEdgeType::kFillAA:
+ return GrClipEdgeType::kInverseFillAA;
+ case GrClipEdgeType::kInverseFillBW:
+ return GrClipEdgeType::kFillBW;
+ case GrClipEdgeType::kInverseFillAA:
+ return GrClipEdgeType::kFillAA;
+ }
+ SkUNREACHABLE;
+}
+
+/**
+ * Indicates the type of pending IO operations that can be recorded for gpu resources.
+ */
+enum GrIOType {
+ kRead_GrIOType,
+ kWrite_GrIOType,
+ kRW_GrIOType
+};
+
+/**
+ * Indicates the type of data that a GPU buffer will be used for.
+ */
+enum class GrGpuBufferType {
+ kVertex,
+ kIndex,
+ kDrawIndirect,
+ kXferCpuToGpu,
+ kXferGpuToCpu,
+ kUniform,
+};
+static const constexpr int kGrGpuBufferTypeCount = static_cast<int>(GrGpuBufferType::kUniform) + 1;
+
+/**
+ * Provides a performance hint regarding the frequency at which a data store will be accessed.
+ */
+enum GrAccessPattern {
+ /** Data store will be respecified repeatedly and used many times. */
+ kDynamic_GrAccessPattern,
+ /** Data store will be specified once and used many times. (Thus disqualified from caching.) */
+ kStatic_GrAccessPattern,
+ /** Data store will be specified once and used at most a few times. (Also can't be cached.) */
+ kStream_GrAccessPattern,
+
+ kLast_GrAccessPattern = kStream_GrAccessPattern
+};
+
+// Flags shared between the GrSurface & GrSurfaceProxy class hierarchies
+enum class GrInternalSurfaceFlags {
+ kNone = 0,
+
+ // Texture-level
+
+ // Means the pixels in the texture are read-only. Cannot also be a GrRenderTarget[Proxy].
+ kReadOnly = 1 << 0,
+
+ // RT-level
+
+ // This flag is for use with GL only. It tells us that the internal render target wraps FBO 0.
+ kGLRTFBOIDIs0 = 1 << 1,
+
+ // This means the render target is multisampled, and internally holds a non-msaa texture for
+ // resolving into. The render target resolves itself by blitting into this internal texture.
+ // (asTexture() might or might not return the internal texture, but if it does, we always
+ // resolve the render target before accessing this texture's data.)
+ kRequiresManualMSAAResolve = 1 << 2,
+
+ // This means the pixels in the render target are write-only. This is used for Dawn and Metal
+ // swap chain targets which can be rendered to, but not read or copied.
+ kFramebufferOnly = 1 << 3,
+
+ // This is a Vulkan only flag. If set the surface can be used as an input attachment in a
+ // shader. This is used for doing in shader blending where we want to sample from the same
+ // image we are drawing to.
+ kVkRTSupportsInputAttachment = 1 << 4,
+};
+
+GR_MAKE_BITFIELD_CLASS_OPS(GrInternalSurfaceFlags)
+
+// 'GR_MAKE_BITFIELD_CLASS_OPS' defines the & operator on GrInternalSurfaceFlags to return bool.
+// We want to find the bitwise & with these masks, so we declare them as ints.
+constexpr static int kGrInternalTextureFlagsMask = static_cast<int>(
+ GrInternalSurfaceFlags::kReadOnly);
+
+// We don't include kVkRTSupportsInputAttachment in this mask since we check it manually. We don't
+// require that both the surface and proxy have matching values for this flag. Instead we require
+// if the proxy has it set then the surface must also have it set. All other flags listed here must
+// match on the proxy and surface.
+// TODO: Add back kFramebufferOnly flag here once we update SkSurfaceCharacterization to take it
+// as a flag. skbug.com/10672
+constexpr static int kGrInternalRenderTargetFlagsMask = static_cast<int>(
+ GrInternalSurfaceFlags::kGLRTFBOIDIs0 |
+ GrInternalSurfaceFlags::kRequiresManualMSAAResolve/* |
+ GrInternalSurfaceFlags::kFramebufferOnly*/);
+
+constexpr static int kGrInternalTextureRenderTargetFlagsMask =
+ kGrInternalTextureFlagsMask | kGrInternalRenderTargetFlagsMask;
+
+#ifdef SK_DEBUG
+// Takes a pointer to a GrCaps, and will suppress prints if required
+#define GrCapsDebugf(caps, ...) if (!(caps)->suppressPrints()) SkDebugf(__VA_ARGS__)
+#else
+#define GrCapsDebugf(caps, ...) do {} while (0)
+#endif
+
+/**
+ * Specifies if the holder owns the backend, OpenGL or Vulkan, object.
+ */
+enum class GrBackendObjectOwnership : bool {
+ /** Holder does not destroy the backend object. */
+ kBorrowed = false,
+ /** Holder destroys the backend object. */
+ kOwned = true
+};
+
+/*
+ * Object for CPU-GPU synchronization
+ */
+typedef uint64_t GrFence;
+
+/**
+ * Used to include or exclude specific GPU path renderers for testing purposes.
+ */
+enum class GpuPathRenderers {
+ kNone = 0, // Always use software masks and/or DefaultPathRenderer.
+ kDashLine = 1 << 0,
+ kAtlas = 1 << 1,
+ kTessellation = 1 << 2,
+ kCoverageCounting = 1 << 3,
+ kAAHairline = 1 << 4,
+ kAAConvex = 1 << 5,
+ kAALinearizing = 1 << 6,
+ kSmall = 1 << 7,
+ kTriangulating = 1 << 8,
+ kDefault = ((1 << 9) - 1) // All path renderers.
+};
+
+/**
+ * Used to describe the current state of Mips on a GrTexture
+ */
+enum class GrMipmapStatus {
+ kNotAllocated, // Mips have not been allocated
+ kDirty, // Mips are allocated but the full mip tree does not have valid data
+ kValid, // All levels fully allocated and have valid data in them
+};
+
+GR_MAKE_BITFIELD_CLASS_OPS(GpuPathRenderers)
+
+/**
+ * Like SkColorType this describes a layout of pixel data in CPU memory. It specifies the channels,
+ * their type, and width. This exists so that the GPU backend can have private types that have no
+ * analog in the public facing SkColorType enum and omit types not implemented in the GPU backend.
+ * It does not refer to a texture format and the mapping to texture formats may be many-to-many.
+ * It does not specify the sRGB encoding of the stored values. The components are listed in order of
+ * where they appear in memory. In other words the first component listed is in the low bits and
+ * the last component in the high bits.
+ */
+enum class GrColorType {
+ kUnknown,
+ kAlpha_8,
+ kBGR_565,
+ kABGR_4444, // This name differs from SkColorType. kARGB_4444_SkColorType is misnamed.
+ kRGBA_8888,
+ kRGBA_8888_SRGB,
+ kRGB_888x,
+ kRG_88,
+ kBGRA_8888,
+ kRGBA_1010102,
+ kBGRA_1010102,
+ kGray_8,
+ kGrayAlpha_88,
+ kAlpha_F16,
+ kRGBA_F16,
+ kRGBA_F16_Clamped,
+ kRGBA_F32,
+
+ kAlpha_16,
+ kRG_1616,
+ kRG_F16,
+ kRGBA_16161616,
+
+ // Unusual types that come up after reading back in cases where we are reassigning the meaning
+ // of a texture format's channels to use for a particular color format but have to read back the
+ // data to a full RGBA quadruple. (e.g. using a R8 texture format as A8 color type but the API
+ // only supports reading to RGBA8.) None of these have SkColorType equivalents.
+ kAlpha_8xxx,
+ kAlpha_F32xxx,
+ kGray_8xxx,
+ kR_8xxx,
+
+ // Types used to initialize backend textures.
+ kRGB_888,
+ kR_8,
+ kR_16,
+ kR_F16,
+ kGray_F16,
+ kBGRA_4444,
+ kARGB_4444,
+
+ kLast = kARGB_4444
+};
+
+static const int kGrColorTypeCnt = static_cast<int>(GrColorType::kLast) + 1;
+
+static constexpr SkColorType GrColorTypeToSkColorType(GrColorType ct) {
+ switch (ct) {
+ case GrColorType::kUnknown: return kUnknown_SkColorType;
+ case GrColorType::kAlpha_8: return kAlpha_8_SkColorType;
+ case GrColorType::kBGR_565: return kRGB_565_SkColorType;
+ case GrColorType::kABGR_4444: return kARGB_4444_SkColorType;
+ case GrColorType::kRGBA_8888: return kRGBA_8888_SkColorType;
+ case GrColorType::kRGBA_8888_SRGB: return kSRGBA_8888_SkColorType;
+ case GrColorType::kRGB_888x: return kRGB_888x_SkColorType;
+ case GrColorType::kRG_88: return kR8G8_unorm_SkColorType;
+ case GrColorType::kBGRA_8888: return kBGRA_8888_SkColorType;
+ case GrColorType::kRGBA_1010102: return kRGBA_1010102_SkColorType;
+ case GrColorType::kBGRA_1010102: return kBGRA_1010102_SkColorType;
+ case GrColorType::kGray_8: return kGray_8_SkColorType;
+ case GrColorType::kGrayAlpha_88: return kUnknown_SkColorType;
+ case GrColorType::kAlpha_F16: return kA16_float_SkColorType;
+ case GrColorType::kRGBA_F16: return kRGBA_F16_SkColorType;
+ case GrColorType::kRGBA_F16_Clamped: return kRGBA_F16Norm_SkColorType;
+ case GrColorType::kRGBA_F32: return kRGBA_F32_SkColorType;
+ case GrColorType::kAlpha_8xxx: return kUnknown_SkColorType;
+ case GrColorType::kAlpha_F32xxx: return kUnknown_SkColorType;
+ case GrColorType::kGray_8xxx: return kUnknown_SkColorType;
+ case GrColorType::kR_8xxx: return kUnknown_SkColorType;
+ case GrColorType::kAlpha_16: return kA16_unorm_SkColorType;
+ case GrColorType::kRG_1616: return kR16G16_unorm_SkColorType;
+ case GrColorType::kRGBA_16161616: return kR16G16B16A16_unorm_SkColorType;
+ case GrColorType::kRG_F16: return kR16G16_float_SkColorType;
+ case GrColorType::kRGB_888: return kUnknown_SkColorType;
+ case GrColorType::kR_8: return kR8_unorm_SkColorType;
+ case GrColorType::kR_16: return kUnknown_SkColorType;
+ case GrColorType::kR_F16: return kUnknown_SkColorType;
+ case GrColorType::kGray_F16: return kUnknown_SkColorType;
+ case GrColorType::kARGB_4444: return kUnknown_SkColorType;
+ case GrColorType::kBGRA_4444: return kUnknown_SkColorType;
+ }
+ SkUNREACHABLE;
+}
+
+static constexpr GrColorType SkColorTypeToGrColorType(SkColorType ct) {
+ switch (ct) {
+ case kUnknown_SkColorType: return GrColorType::kUnknown;
+ case kAlpha_8_SkColorType: return GrColorType::kAlpha_8;
+ case kRGB_565_SkColorType: return GrColorType::kBGR_565;
+ case kARGB_4444_SkColorType: return GrColorType::kABGR_4444;
+ case kRGBA_8888_SkColorType: return GrColorType::kRGBA_8888;
+ case kSRGBA_8888_SkColorType: return GrColorType::kRGBA_8888_SRGB;
+ case kRGB_888x_SkColorType: return GrColorType::kRGB_888x;
+ case kBGRA_8888_SkColorType: return GrColorType::kBGRA_8888;
+ case kGray_8_SkColorType: return GrColorType::kGray_8;
+ case kRGBA_F16Norm_SkColorType: return GrColorType::kRGBA_F16_Clamped;
+ case kRGBA_F16_SkColorType: return GrColorType::kRGBA_F16;
+ case kRGBA_1010102_SkColorType: return GrColorType::kRGBA_1010102;
+ case kRGB_101010x_SkColorType: return GrColorType::kUnknown;
+ case kBGRA_1010102_SkColorType: return GrColorType::kBGRA_1010102;
+ case kBGR_101010x_SkColorType: return GrColorType::kUnknown;
+ case kBGR_101010x_XR_SkColorType: return GrColorType::kUnknown;
+ case kRGBA_F32_SkColorType: return GrColorType::kRGBA_F32;
+ case kR8G8_unorm_SkColorType: return GrColorType::kRG_88;
+ case kA16_unorm_SkColorType: return GrColorType::kAlpha_16;
+ case kR16G16_unorm_SkColorType: return GrColorType::kRG_1616;
+ case kA16_float_SkColorType: return GrColorType::kAlpha_F16;
+ case kR16G16_float_SkColorType: return GrColorType::kRG_F16;
+ case kR16G16B16A16_unorm_SkColorType: return GrColorType::kRGBA_16161616;
+ case kR8_unorm_SkColorType: return GrColorType::kR_8;
+ }
+ SkUNREACHABLE;
+}
+
+static constexpr uint32_t GrColorTypeChannelFlags(GrColorType ct) {
+ switch (ct) {
+ case GrColorType::kUnknown: return 0;
+ case GrColorType::kAlpha_8: return kAlpha_SkColorChannelFlag;
+ case GrColorType::kBGR_565: return kRGB_SkColorChannelFlags;
+ case GrColorType::kABGR_4444: return kRGBA_SkColorChannelFlags;
+ case GrColorType::kRGBA_8888: return kRGBA_SkColorChannelFlags;
+ case GrColorType::kRGBA_8888_SRGB: return kRGBA_SkColorChannelFlags;
+ case GrColorType::kRGB_888x: return kRGB_SkColorChannelFlags;
+ case GrColorType::kRG_88: return kRG_SkColorChannelFlags;
+ case GrColorType::kBGRA_8888: return kRGBA_SkColorChannelFlags;
+ case GrColorType::kRGBA_1010102: return kRGBA_SkColorChannelFlags;
+ case GrColorType::kBGRA_1010102: return kRGBA_SkColorChannelFlags;
+ case GrColorType::kGray_8: return kGray_SkColorChannelFlag;
+ case GrColorType::kGrayAlpha_88: return kGrayAlpha_SkColorChannelFlags;
+ case GrColorType::kAlpha_F16: return kAlpha_SkColorChannelFlag;
+ case GrColorType::kRGBA_F16: return kRGBA_SkColorChannelFlags;
+ case GrColorType::kRGBA_F16_Clamped: return kRGBA_SkColorChannelFlags;
+ case GrColorType::kRGBA_F32: return kRGBA_SkColorChannelFlags;
+ case GrColorType::kAlpha_8xxx: return kAlpha_SkColorChannelFlag;
+ case GrColorType::kAlpha_F32xxx: return kAlpha_SkColorChannelFlag;
+ case GrColorType::kGray_8xxx: return kGray_SkColorChannelFlag;
+ case GrColorType::kR_8xxx: return kRed_SkColorChannelFlag;
+ case GrColorType::kAlpha_16: return kAlpha_SkColorChannelFlag;
+ case GrColorType::kRG_1616: return kRG_SkColorChannelFlags;
+ case GrColorType::kRGBA_16161616: return kRGBA_SkColorChannelFlags;
+ case GrColorType::kRG_F16: return kRG_SkColorChannelFlags;
+ case GrColorType::kRGB_888: return kRGB_SkColorChannelFlags;
+ case GrColorType::kR_8: return kRed_SkColorChannelFlag;
+ case GrColorType::kR_16: return kRed_SkColorChannelFlag;
+ case GrColorType::kR_F16: return kRed_SkColorChannelFlag;
+ case GrColorType::kGray_F16: return kGray_SkColorChannelFlag;
+ case GrColorType::kARGB_4444: return kRGBA_SkColorChannelFlags;
+ case GrColorType::kBGRA_4444: return kRGBA_SkColorChannelFlags;
+ }
+ SkUNREACHABLE;
+}
+
+/**
+ * Describes the encoding of channel data in a GrColorType.
+ */
+enum class GrColorTypeEncoding {
+ kUnorm,
+ kSRGBUnorm,
+ // kSnorm,
+ kFloat,
+ // kSint
+ // kUint
+};
+
+/**
+ * Describes a GrColorType by how many bits are used for each color component and how they are
+ * encoded. Currently all the non-zero channels share a single GrColorTypeEncoding. This could be
+ * expanded to store separate encodings and to indicate which bits belong to which components.
+ */
+class GrColorFormatDesc {
+public:
+ static constexpr GrColorFormatDesc MakeRGBA(int rgba, GrColorTypeEncoding e) {
+ return {rgba, rgba, rgba, rgba, 0, e};
+ }
+
+ static constexpr GrColorFormatDesc MakeRGBA(int rgb, int a, GrColorTypeEncoding e) {
+ return {rgb, rgb, rgb, a, 0, e};
+ }
+
+ static constexpr GrColorFormatDesc MakeRGB(int rgb, GrColorTypeEncoding e) {
+ return {rgb, rgb, rgb, 0, 0, e};
+ }
+
+ static constexpr GrColorFormatDesc MakeRGB(int r, int g, int b, GrColorTypeEncoding e) {
+ return {r, g, b, 0, 0, e};
+ }
+
+ static constexpr GrColorFormatDesc MakeAlpha(int a, GrColorTypeEncoding e) {
+ return {0, 0, 0, a, 0, e};
+ }
+
+ static constexpr GrColorFormatDesc MakeR(int r, GrColorTypeEncoding e) {
+ return {r, 0, 0, 0, 0, e};
+ }
+
+ static constexpr GrColorFormatDesc MakeRG(int rg, GrColorTypeEncoding e) {
+ return {rg, rg, 0, 0, 0, e};
+ }
+
+ static constexpr GrColorFormatDesc MakeGray(int grayBits, GrColorTypeEncoding e) {
+ return {0, 0, 0, 0, grayBits, e};
+ }
+
+ static constexpr GrColorFormatDesc MakeGrayAlpha(int grayAlpha, GrColorTypeEncoding e) {
+ return {0, 0, 0, 0, grayAlpha, e};
+ }
+
+ static constexpr GrColorFormatDesc MakeInvalid() { return {}; }
+
+ constexpr int r() const { return fRBits; }
+ constexpr int g() const { return fGBits; }
+ constexpr int b() const { return fBBits; }
+ constexpr int a() const { return fABits; }
+ constexpr int operator[](int c) const {
+ switch (c) {
+ case 0: return this->r();
+ case 1: return this->g();
+ case 2: return this->b();
+ case 3: return this->a();
+ }
+ SkUNREACHABLE;
+ }
+
+ constexpr int gray() const { return fGrayBits; }
+
+ constexpr GrColorTypeEncoding encoding() const { return fEncoding; }
+
+private:
+ int fRBits = 0;
+ int fGBits = 0;
+ int fBBits = 0;
+ int fABits = 0;
+ int fGrayBits = 0;
+ GrColorTypeEncoding fEncoding = GrColorTypeEncoding::kUnorm;
+
+ constexpr GrColorFormatDesc() = default;
+
+ constexpr GrColorFormatDesc(int r, int g, int b, int a, int gray, GrColorTypeEncoding encoding)
+ : fRBits(r), fGBits(g), fBBits(b), fABits(a), fGrayBits(gray), fEncoding(encoding) {
+ SkASSERT(r >= 0 && g >= 0 && b >= 0 && a >= 0 && gray >= 0);
+ SkASSERT(!gray || (!r && !g && !b));
+ SkASSERT(r || g || b || a || gray);
+ }
+};
+
+static constexpr GrColorFormatDesc GrGetColorTypeDesc(GrColorType ct) {
+ switch (ct) {
+ case GrColorType::kUnknown:
+ return GrColorFormatDesc::MakeInvalid();
+ case GrColorType::kAlpha_8:
+ return GrColorFormatDesc::MakeAlpha(8, GrColorTypeEncoding::kUnorm);
+ case GrColorType::kBGR_565:
+ return GrColorFormatDesc::MakeRGB(5, 6, 5, GrColorTypeEncoding::kUnorm);
+ case GrColorType::kABGR_4444:
+ return GrColorFormatDesc::MakeRGBA(4, GrColorTypeEncoding::kUnorm);
+ case GrColorType::kRGBA_8888:
+ return GrColorFormatDesc::MakeRGBA(8, GrColorTypeEncoding::kUnorm);
+ case GrColorType::kRGBA_8888_SRGB:
+ return GrColorFormatDesc::MakeRGBA(8, GrColorTypeEncoding::kSRGBUnorm);
+ case GrColorType::kRGB_888x:
+ return GrColorFormatDesc::MakeRGB(8, GrColorTypeEncoding::kUnorm);
+ case GrColorType::kRG_88:
+ return GrColorFormatDesc::MakeRG(8, GrColorTypeEncoding::kUnorm);
+ case GrColorType::kBGRA_8888:
+ return GrColorFormatDesc::MakeRGBA(8, GrColorTypeEncoding::kUnorm);
+ case GrColorType::kRGBA_1010102:
+ return GrColorFormatDesc::MakeRGBA(10, 2, GrColorTypeEncoding::kUnorm);
+ case GrColorType::kBGRA_1010102:
+ return GrColorFormatDesc::MakeRGBA(10, 2, GrColorTypeEncoding::kUnorm);
+ case GrColorType::kGray_8:
+ return GrColorFormatDesc::MakeGray(8, GrColorTypeEncoding::kUnorm);
+ case GrColorType::kGrayAlpha_88:
+ return GrColorFormatDesc::MakeGrayAlpha(8, GrColorTypeEncoding::kUnorm);
+ case GrColorType::kAlpha_F16:
+ return GrColorFormatDesc::MakeAlpha(16, GrColorTypeEncoding::kFloat);
+ case GrColorType::kRGBA_F16:
+ return GrColorFormatDesc::MakeRGBA(16, GrColorTypeEncoding::kFloat);
+ case GrColorType::kRGBA_F16_Clamped:
+ return GrColorFormatDesc::MakeRGBA(16, GrColorTypeEncoding::kFloat);
+ case GrColorType::kRGBA_F32:
+ return GrColorFormatDesc::MakeRGBA(32, GrColorTypeEncoding::kFloat);
+ case GrColorType::kAlpha_8xxx:
+ return GrColorFormatDesc::MakeAlpha(8, GrColorTypeEncoding::kUnorm);
+ case GrColorType::kAlpha_F32xxx:
+ return GrColorFormatDesc::MakeAlpha(32, GrColorTypeEncoding::kFloat);
+ case GrColorType::kGray_8xxx:
+ return GrColorFormatDesc::MakeGray(8, GrColorTypeEncoding::kUnorm);
+ case GrColorType::kR_8xxx:
+ return GrColorFormatDesc::MakeR(8, GrColorTypeEncoding::kUnorm);
+ case GrColorType::kAlpha_16:
+ return GrColorFormatDesc::MakeAlpha(16, GrColorTypeEncoding::kUnorm);
+ case GrColorType::kRG_1616:
+ return GrColorFormatDesc::MakeRG(16, GrColorTypeEncoding::kUnorm);
+ case GrColorType::kRGBA_16161616:
+ return GrColorFormatDesc::MakeRGBA(16, GrColorTypeEncoding::kUnorm);
+ case GrColorType::kRG_F16:
+ return GrColorFormatDesc::MakeRG(16, GrColorTypeEncoding::kFloat);
+ case GrColorType::kRGB_888:
+ return GrColorFormatDesc::MakeRGB(8, GrColorTypeEncoding::kUnorm);
+ case GrColorType::kR_8:
+ return GrColorFormatDesc::MakeR(8, GrColorTypeEncoding::kUnorm);
+ case GrColorType::kR_16:
+ return GrColorFormatDesc::MakeR(16, GrColorTypeEncoding::kUnorm);
+ case GrColorType::kR_F16:
+ return GrColorFormatDesc::MakeR(16, GrColorTypeEncoding::kFloat);
+ case GrColorType::kGray_F16:
+ return GrColorFormatDesc::MakeGray(16, GrColorTypeEncoding::kFloat);
+ case GrColorType::kARGB_4444:
+ return GrColorFormatDesc::MakeRGBA(4, GrColorTypeEncoding::kUnorm);
+ case GrColorType::kBGRA_4444:
+ return GrColorFormatDesc::MakeRGBA(4, GrColorTypeEncoding::kUnorm);
+ }
+ SkUNREACHABLE;
+}
+
+static constexpr GrClampType GrColorTypeClampType(GrColorType colorType) {
+ if (GrGetColorTypeDesc(colorType).encoding() == GrColorTypeEncoding::kUnorm ||
+ GrGetColorTypeDesc(colorType).encoding() == GrColorTypeEncoding::kSRGBUnorm) {
+ return GrClampType::kAuto;
+ }
+ return GrColorType::kRGBA_F16_Clamped == colorType ? GrClampType::kManual : GrClampType::kNone;
+}
+
+// Consider a color type "wider" than n if it has more than n bits for any its representable
+// channels.
+static constexpr bool GrColorTypeIsWiderThan(GrColorType colorType, int n) {
+ SkASSERT(n > 0);
+ auto desc = GrGetColorTypeDesc(colorType);
+ return (desc.r() && desc.r() > n )||
+ (desc.g() && desc.g() > n) ||
+ (desc.b() && desc.b() > n) ||
+ (desc.a() && desc.a() > n) ||
+ (desc.gray() && desc.gray() > n);
+}
+
+static constexpr bool GrColorTypeIsAlphaOnly(GrColorType ct) {
+ return GrColorTypeChannelFlags(ct) == kAlpha_SkColorChannelFlag;
+}
+
+static constexpr bool GrColorTypeHasAlpha(GrColorType ct) {
+ return GrColorTypeChannelFlags(ct) & kAlpha_SkColorChannelFlag;
+}
+
+static constexpr size_t GrColorTypeBytesPerPixel(GrColorType ct) {
+ switch (ct) {
+ case GrColorType::kUnknown: return 0;
+ case GrColorType::kAlpha_8: return 1;
+ case GrColorType::kBGR_565: return 2;
+ case GrColorType::kABGR_4444: return 2;
+ case GrColorType::kRGBA_8888: return 4;
+ case GrColorType::kRGBA_8888_SRGB: return 4;
+ case GrColorType::kRGB_888x: return 4;
+ case GrColorType::kRG_88: return 2;
+ case GrColorType::kBGRA_8888: return 4;
+ case GrColorType::kRGBA_1010102: return 4;
+ case GrColorType::kBGRA_1010102: return 4;
+ case GrColorType::kGray_8: return 1;
+ case GrColorType::kGrayAlpha_88: return 2;
+ case GrColorType::kAlpha_F16: return 2;
+ case GrColorType::kRGBA_F16: return 8;
+ case GrColorType::kRGBA_F16_Clamped: return 8;
+ case GrColorType::kRGBA_F32: return 16;
+ case GrColorType::kAlpha_8xxx: return 4;
+ case GrColorType::kAlpha_F32xxx: return 16;
+ case GrColorType::kGray_8xxx: return 4;
+ case GrColorType::kR_8xxx: return 4;
+ case GrColorType::kAlpha_16: return 2;
+ case GrColorType::kRG_1616: return 4;
+ case GrColorType::kRGBA_16161616: return 8;
+ case GrColorType::kRG_F16: return 4;
+ case GrColorType::kRGB_888: return 3;
+ case GrColorType::kR_8: return 1;
+ case GrColorType::kR_16: return 2;
+ case GrColorType::kR_F16: return 2;
+ case GrColorType::kGray_F16: return 2;
+ case GrColorType::kARGB_4444: return 2;
+ case GrColorType::kBGRA_4444: return 2;
+ }
+ SkUNREACHABLE;
+}
+
+// In general we try to not mix CompressionType and ColorType, but currently SkImage still requires
+// an SkColorType even for CompressedTypes so we need some conversion.
+static constexpr SkColorType GrCompressionTypeToSkColorType(SkTextureCompressionType compression) {
+ switch (compression) {
+ case SkTextureCompressionType::kNone: return kUnknown_SkColorType;
+ case SkTextureCompressionType::kETC2_RGB8_UNORM: return kRGB_888x_SkColorType;
+ case SkTextureCompressionType::kBC1_RGB8_UNORM: return kRGB_888x_SkColorType;
+ case SkTextureCompressionType::kBC1_RGBA8_UNORM: return kRGBA_8888_SkColorType;
+ }
+
+ SkUNREACHABLE;
+}
+
+enum class GrDstSampleFlags {
+ kNone = 0,
+ kRequiresTextureBarrier = 1 << 0,
+ kAsInputAttachment = 1 << 1,
+};
+GR_MAKE_BITFIELD_CLASS_OPS(GrDstSampleFlags)
+
+using GrVisitProxyFunc = std::function<void(GrSurfaceProxy*, GrMipmapped)>;
+
+#if defined(SK_DEBUG) || GR_TEST_UTILS || defined(SK_ENABLE_DUMP_GPU)
+static constexpr const char* GrBackendApiToStr(GrBackendApi api) {
+ switch (api) {
+ case GrBackendApi::kOpenGL: return "OpenGL";
+ case GrBackendApi::kVulkan: return "Vulkan";
+ case GrBackendApi::kMetal: return "Metal";
+ case GrBackendApi::kDirect3D: return "Direct3D";
+ case GrBackendApi::kDawn: return "Dawn";
+ case GrBackendApi::kMock: return "Mock";
+ }
+ SkUNREACHABLE;
+}
+
+static constexpr const char* GrColorTypeToStr(GrColorType ct) {
+ switch (ct) {
+ case GrColorType::kUnknown: return "kUnknown";
+ case GrColorType::kAlpha_8: return "kAlpha_8";
+ case GrColorType::kBGR_565: return "kRGB_565";
+ case GrColorType::kABGR_4444: return "kABGR_4444";
+ case GrColorType::kRGBA_8888: return "kRGBA_8888";
+ case GrColorType::kRGBA_8888_SRGB: return "kRGBA_8888_SRGB";
+ case GrColorType::kRGB_888x: return "kRGB_888x";
+ case GrColorType::kRG_88: return "kRG_88";
+ case GrColorType::kBGRA_8888: return "kBGRA_8888";
+ case GrColorType::kRGBA_1010102: return "kRGBA_1010102";
+ case GrColorType::kBGRA_1010102: return "kBGRA_1010102";
+ case GrColorType::kGray_8: return "kGray_8";
+ case GrColorType::kGrayAlpha_88: return "kGrayAlpha_88";
+ case GrColorType::kAlpha_F16: return "kAlpha_F16";
+ case GrColorType::kRGBA_F16: return "kRGBA_F16";
+ case GrColorType::kRGBA_F16_Clamped: return "kRGBA_F16_Clamped";
+ case GrColorType::kRGBA_F32: return "kRGBA_F32";
+ case GrColorType::kAlpha_8xxx: return "kAlpha_8xxx";
+ case GrColorType::kAlpha_F32xxx: return "kAlpha_F32xxx";
+ case GrColorType::kGray_8xxx: return "kGray_8xxx";
+ case GrColorType::kR_8xxx: return "kR_8xxx";
+ case GrColorType::kAlpha_16: return "kAlpha_16";
+ case GrColorType::kRG_1616: return "kRG_1616";
+ case GrColorType::kRGBA_16161616: return "kRGBA_16161616";
+ case GrColorType::kRG_F16: return "kRG_F16";
+ case GrColorType::kRGB_888: return "kRGB_888";
+ case GrColorType::kR_8: return "kR_8";
+ case GrColorType::kR_16: return "kR_16";
+ case GrColorType::kR_F16: return "kR_F16";
+ case GrColorType::kGray_F16: return "kGray_F16";
+ case GrColorType::kARGB_4444: return "kARGB_4444";
+ case GrColorType::kBGRA_4444: return "kBGRA_4444";
+ }
+ SkUNREACHABLE;
+}
+
+static constexpr const char* GrCompressionTypeToStr(SkTextureCompressionType compression) {
+ switch (compression) {
+ case SkTextureCompressionType::kNone: return "kNone";
+ case SkTextureCompressionType::kETC2_RGB8_UNORM: return "kETC2_RGB8_UNORM";
+ case SkTextureCompressionType::kBC1_RGB8_UNORM: return "kBC1_RGB8_UNORM";
+ case SkTextureCompressionType::kBC1_RGBA8_UNORM: return "kBC1_RGBA8_UNORM";
+ }
+ SkUNREACHABLE;
+}
+
+static constexpr const char* GrSurfaceOriginToStr(GrSurfaceOrigin origin) {
+ switch (origin) {
+ case kTopLeft_GrSurfaceOrigin: return "kTopLeft";
+ case kBottomLeft_GrSurfaceOrigin: return "kBottomLeft";
+ }
+ SkUNREACHABLE;
+}
+#endif
+
+#endif
diff --git a/gfx/skia/skia/include/private/gpu/ganesh/GrVkTypesPriv.h b/gfx/skia/skia/include/private/gpu/ganesh/GrVkTypesPriv.h
new file mode 100644
index 0000000000..f300a71396
--- /dev/null
+++ b/gfx/skia/skia/include/private/gpu/ganesh/GrVkTypesPriv.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkTypesPriv_DEFINED
+#define GrVkTypesPriv_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/gpu/vk/GrVkTypes.h"
+
+namespace skgpu {
+class MutableTextureStateRef;
+}
+
+
+// This struct is to used to store the the actual information about the vulkan backend image on the
+// GrBackendTexture and GrBackendRenderTarget. When a client calls getVkImageInfo on a
+// GrBackendTexture/RenderTarget, we use the GrVkBackendSurfaceInfo to create a snapshot
+// GrVkImgeInfo object. Internally, this uses a ref count GrVkImageLayout object to track the
+// current VkImageLayout which can be shared with an internal GrVkImage so that layout updates can
+// be seen by all users of the image.
+struct GrVkBackendSurfaceInfo {
+ GrVkBackendSurfaceInfo(GrVkImageInfo info) : fImageInfo(info) {}
+
+ void cleanup();
+
+ GrVkBackendSurfaceInfo& operator=(const GrVkBackendSurfaceInfo&) = delete;
+
+ // Assigns the passed in GrVkBackendSurfaceInfo to this object. if isValid is true we will also
+ // attempt to unref the old fLayout on this object.
+ void assign(const GrVkBackendSurfaceInfo&, bool isValid);
+
+ GrVkImageInfo snapImageInfo(const skgpu::MutableTextureStateRef*) const;
+
+ bool isProtected() const { return fImageInfo.fProtected == skgpu::Protected::kYes; }
+#if GR_TEST_UTILS
+ bool operator==(const GrVkBackendSurfaceInfo& that) const;
+#endif
+
+private:
+ GrVkImageInfo fImageInfo;
+};
+
+struct GrVkImageSpec {
+ GrVkImageSpec()
+ : fImageTiling(VK_IMAGE_TILING_OPTIMAL)
+ , fFormat(VK_FORMAT_UNDEFINED)
+ , fImageUsageFlags(0)
+ , fSharingMode(VK_SHARING_MODE_EXCLUSIVE) {}
+
+ GrVkImageSpec(const GrVkSurfaceInfo& info)
+ : fImageTiling(info.fImageTiling)
+ , fFormat(info.fFormat)
+ , fImageUsageFlags(info.fImageUsageFlags)
+ , fYcbcrConversionInfo(info.fYcbcrConversionInfo)
+ , fSharingMode(info.fSharingMode) {}
+
+ VkImageTiling fImageTiling;
+ VkFormat fFormat;
+ VkImageUsageFlags fImageUsageFlags;
+ GrVkYcbcrConversionInfo fYcbcrConversionInfo;
+ VkSharingMode fSharingMode;
+};
+
+GrVkSurfaceInfo GrVkImageSpecToSurfaceInfo(const GrVkImageSpec& vkSpec,
+ uint32_t sampleCount,
+ uint32_t levelCount,
+ skgpu::Protected isProtected);
+
+#endif
diff --git a/gfx/skia/skia/include/private/gpu/graphite/DawnTypesPriv.h b/gfx/skia/skia/include/private/gpu/graphite/DawnTypesPriv.h
new file mode 100644
index 0000000000..bbf401c95e
--- /dev/null
+++ b/gfx/skia/skia/include/private/gpu/graphite/DawnTypesPriv.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2022 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_graphite_DawnTypesPriv_DEFINED
+#define skgpu_graphite_DawnTypesPriv_DEFINED
+
+#include "include/gpu/graphite/dawn/DawnTypes.h"
+
+namespace skgpu::graphite {
+
+struct DawnTextureSpec {
+ DawnTextureSpec()
+ : fFormat(wgpu::TextureFormat::Undefined)
+ , fUsage(wgpu::TextureUsage::None) {}
+ DawnTextureSpec(const DawnTextureInfo& info)
+ : fFormat(info.fFormat)
+ , fUsage(info.fUsage) {}
+
+ bool operator==(const DawnTextureSpec& that) const {
+ return fUsage == that.fUsage &&
+ fFormat == that.fFormat;
+ }
+
+ wgpu::TextureFormat fFormat;
+ wgpu::TextureUsage fUsage;
+};
+
+DawnTextureInfo DawnTextureSpecToTextureInfo(const DawnTextureSpec& dawnSpec,
+ uint32_t sampleCount,
+ Mipmapped mipmapped);
+
+} // namespace skgpu::graphite
+
+#endif // skgpu_graphite_DawnTypesPriv_DEFINED
diff --git a/gfx/skia/skia/include/private/gpu/graphite/MtlGraphiteTypesPriv.h b/gfx/skia/skia/include/private/gpu/graphite/MtlGraphiteTypesPriv.h
new file mode 100644
index 0000000000..bf26aa2a78
--- /dev/null
+++ b/gfx/skia/skia/include/private/gpu/graphite/MtlGraphiteTypesPriv.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2021 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_graphite_MtlGraphiteTypesPriv_DEFINED
+#define skgpu_graphite_MtlGraphiteTypesPriv_DEFINED
+
+#include "include/gpu/graphite/GraphiteTypes.h"
+#include "include/gpu/graphite/mtl/MtlGraphiteTypes.h"
+
+///////////////////////////////////////////////////////////////////////////////
+
+#ifdef __APPLE__
+
+#include <TargetConditionals.h>
+
+// We're using the MSL version as shorthand for the Metal SDK version here
+#if defined(SK_BUILD_FOR_MAC)
+#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 110000
+#define GR_METAL_SDK_VERSION 230
+#elif __MAC_OS_X_VERSION_MAX_ALLOWED >= 120000
+#define GR_METAL_SDK_VERSION 240
+#else
+#error Must use at least 11.00 SDK to build Metal backend for MacOS
+#endif
+#else
+#if __IPHONE_OS_VERSION_MAX_ALLOWED >= 140000 || __TV_OS_VERSION_MAX_ALLOWED >= 140000
+#define GR_METAL_SDK_VERSION 230
+#elif __IPHONE_OS_VERSION_MAX_ALLOWED >= 150000 || __TV_OS_VERSION_MAX_ALLOWED >= 150000
+#define GR_METAL_SDK_VERSION 240
+#else
+#error Must use at least 14.00 SDK to build Metal backend for iOS
+#endif
+#endif
+
+#endif // __APPLE__
+
+namespace skgpu::graphite {
+
+struct MtlTextureSpec {
+ MtlTextureSpec()
+ : fFormat(0)
+ , fUsage(0)
+ , fStorageMode(0)
+ , fFramebufferOnly(false) {}
+ MtlTextureSpec(const MtlTextureInfo& info)
+ : fFormat(info.fFormat)
+ , fUsage(info.fUsage)
+ , fStorageMode(info.fStorageMode)
+ , fFramebufferOnly(info.fFramebufferOnly) {}
+
+ bool operator==(const MtlTextureSpec& that) const {
+ return fFormat == that.fFormat &&
+ fUsage == that.fUsage &&
+ fStorageMode == that.fStorageMode &&
+ fFramebufferOnly == that.fFramebufferOnly;
+ }
+
+ MtlPixelFormat fFormat;
+ MtlTextureUsage fUsage;
+ MtlStorageMode fStorageMode;
+ bool fFramebufferOnly;
+};
+
+MtlTextureInfo MtlTextureSpecToTextureInfo(const MtlTextureSpec& mtlSpec,
+ uint32_t sampleCount,
+ Mipmapped mipmapped);
+
+} // namespace skgpu::graphite
+
+#endif // skgpu_graphite_MtlGraphiteTypesPriv_DEFINED
diff --git a/gfx/skia/skia/include/private/gpu/graphite/VulkanGraphiteTypesPriv.h b/gfx/skia/skia/include/private/gpu/graphite/VulkanGraphiteTypesPriv.h
new file mode 100644
index 0000000000..b4304e3ae8
--- /dev/null
+++ b/gfx/skia/skia/include/private/gpu/graphite/VulkanGraphiteTypesPriv.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2022 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_graphite_VulkanGraphiteTypesPriv_DEFINED
+#define skgpu_graphite_VulkanGraphiteTypesPriv_DEFINED
+
+#include "include/gpu/graphite/vk/VulkanGraphiteTypes.h"
+
+namespace skgpu::graphite {
+
+struct VulkanTextureSpec {
+ VulkanTextureSpec()
+ : fFlags(0)
+ , fFormat(VK_FORMAT_UNDEFINED)
+ , fImageTiling(VK_IMAGE_TILING_OPTIMAL)
+ , fImageUsageFlags(0)
+ , fSharingMode(VK_SHARING_MODE_EXCLUSIVE)
+ , fAspectMask(VK_IMAGE_ASPECT_COLOR_BIT) {}
+ VulkanTextureSpec(const VulkanTextureInfo& info)
+ : fFlags(info.fFlags)
+ , fFormat(info.fFormat)
+ , fImageTiling(info.fImageTiling)
+ , fImageUsageFlags(info.fImageUsageFlags)
+ , fSharingMode(info.fSharingMode)
+ , fAspectMask(info.fAspectMask) {}
+
+ bool operator==(const VulkanTextureSpec& that) const {
+ return fFlags == that.fFlags &&
+ fFormat == that.fFormat &&
+ fImageTiling == that.fImageTiling &&
+ fImageUsageFlags == that.fImageUsageFlags &&
+ fSharingMode == that.fSharingMode &&
+ fAspectMask == that.fAspectMask;
+ }
+
+ VkImageCreateFlags fFlags;
+ VkFormat fFormat;
+ VkImageTiling fImageTiling;
+ VkImageUsageFlags fImageUsageFlags;
+ VkSharingMode fSharingMode;
+ VkImageAspectFlags fAspectMask;
+ // GrVkYcbcrConversionInfo fYcbcrConversionInfo;
+};
+
+VulkanTextureInfo VulkanTextureSpecToTextureInfo(const VulkanTextureSpec& vkSpec,
+ uint32_t sampleCount,
+ Mipmapped mipmapped);
+
+} // namespace skgpu::graphite
+
+#endif // skgpu_graphite_VulkanGraphiteTypesPriv_DEFINED
diff --git a/gfx/skia/skia/include/private/gpu/vk/SkiaVulkan.h b/gfx/skia/skia/include/private/gpu/vk/SkiaVulkan.h
new file mode 100644
index 0000000000..ca4bcf108b
--- /dev/null
+++ b/gfx/skia/skia/include/private/gpu/vk/SkiaVulkan.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkiaVulkan_DEFINED
+#define SkiaVulkan_DEFINED
+
+#include "include/core/SkTypes.h"
+
+// IWYU pragma: begin_exports
+
+#if SKIA_IMPLEMENTATION || !defined(SK_VULKAN)
+#include "include/third_party/vulkan/vulkan/vulkan_core.h"
+#else
+// For google3 builds we don't set SKIA_IMPLEMENTATION so we need to make sure that the vulkan
+// headers stay up to date for our needs
+#include <vulkan/vulkan_core.h>
+#endif
+
+#ifdef SK_BUILD_FOR_ANDROID
+// This is needed to get android extensions for external memory
+#if SKIA_IMPLEMENTATION || !defined(SK_VULKAN)
+#include "include/third_party/vulkan/vulkan/vulkan_android.h"
+#else
+// For google3 builds we don't set SKIA_IMPLEMENTATION so we need to make sure that the vulkan
+// headers stay up to date for our needs
+#include <vulkan/vulkan_android.h>
+#endif
+#endif
+
+// IWYU pragma: end_exports
+
+#endif
diff --git a/gfx/skia/skia/include/private/gpu/vk/VulkanTypesPriv.h b/gfx/skia/skia/include/private/gpu/vk/VulkanTypesPriv.h
new file mode 100644
index 0000000000..e99869ca1a
--- /dev/null
+++ b/gfx/skia/skia/include/private/gpu/vk/VulkanTypesPriv.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2022 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_VulkanTypesPriv_DEFINED
+#define skgpu_VulkanTypesPriv_DEFINED
+
+#include "include/gpu/vk/VulkanTypes.h"
+
+#include <atomic>
+
+namespace skgpu {
+
+class VulkanMutableTextureState {
+public:
+ VulkanMutableTextureState(VkImageLayout layout, uint32_t queueFamilyIndex)
+ : fLayout(layout)
+ , fQueueFamilyIndex(queueFamilyIndex) {}
+
+ VulkanMutableTextureState& operator=(const VulkanMutableTextureState& that) {
+ fLayout = that.getImageLayout();
+ fQueueFamilyIndex = that.getQueueFamilyIndex();
+ return *this;
+ }
+
+ void setImageLayout(VkImageLayout layout) {
+ // Defaulting to use std::memory_order_seq_cst
+ fLayout.store(layout);
+ }
+
+ VkImageLayout getImageLayout() const {
+ // Defaulting to use std::memory_order_seq_cst
+ return fLayout.load();
+ }
+
+ void setQueueFamilyIndex(uint32_t queueFamilyIndex) {
+ // Defaulting to use std::memory_order_seq_cst
+ fQueueFamilyIndex.store(queueFamilyIndex);
+ }
+
+ uint32_t getQueueFamilyIndex() const {
+ // Defaulting to use std::memory_order_seq_cst
+ return fQueueFamilyIndex.load();
+ }
+
+private:
+ std::atomic<VkImageLayout> fLayout;
+ std::atomic<uint32_t> fQueueFamilyIndex;
+};
+
+} // namespace skgpu
+
+#endif // skgpu_VulkanGraphiteTypesPriv_DEFINED
+