summaryrefslogtreecommitdiffstats
path: root/gfx/skia/skia/include/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'gfx/skia/skia/include/gpu')
-rw-r--r--gfx/skia/skia/include/gpu/GpuTypes.h72
-rw-r--r--gfx/skia/skia/include/gpu/GrBackendDrawableInfo.h44
-rw-r--r--gfx/skia/skia/include/gpu/GrBackendSemaphore.h140
-rw-r--r--gfx/skia/skia/include/gpu/GrBackendSurface.h666
-rw-r--r--gfx/skia/skia/include/gpu/GrBackendSurfaceMutableState.h26
-rw-r--r--gfx/skia/skia/include/gpu/GrContextOptions.h374
-rw-r--r--gfx/skia/skia/include/gpu/GrContextThreadSafeProxy.h169
-rw-r--r--gfx/skia/skia/include/gpu/GrDirectContext.h908
-rw-r--r--gfx/skia/skia/include/gpu/GrDriverBugWorkarounds.h53
-rw-r--r--gfx/skia/skia/include/gpu/GrDriverBugWorkaroundsAutogen.h43
-rw-r--r--gfx/skia/skia/include/gpu/GrRecordingContext.h286
-rw-r--r--gfx/skia/skia/include/gpu/GrSurfaceInfo.h166
-rw-r--r--gfx/skia/skia/include/gpu/GrTypes.h244
-rw-r--r--gfx/skia/skia/include/gpu/GrYUVABackendTextures.h124
-rw-r--r--gfx/skia/skia/include/gpu/MutableTextureState.h122
-rw-r--r--gfx/skia/skia/include/gpu/ShaderErrorHandler.h36
-rw-r--r--gfx/skia/skia/include/gpu/d3d/GrD3DBackendContext.h35
-rw-r--r--gfx/skia/skia/include/gpu/d3d/GrD3DTypes.h248
-rw-r--r--gfx/skia/skia/include/gpu/dawn/GrDawnTypes.h95
-rw-r--r--gfx/skia/skia/include/gpu/gl/GrGLAssembleHelpers.h11
-rw-r--r--gfx/skia/skia/include/gpu/gl/GrGLAssembleInterface.h39
-rw-r--r--gfx/skia/skia/include/gpu/gl/GrGLConfig.h79
-rw-r--r--gfx/skia/skia/include/gpu/gl/GrGLConfig_chrome.h14
-rw-r--r--gfx/skia/skia/include/gpu/gl/GrGLExtensions.h78
-rw-r--r--gfx/skia/skia/include/gpu/gl/GrGLFunctions.h307
-rw-r--r--gfx/skia/skia/include/gpu/gl/GrGLInterface.h340
-rw-r--r--gfx/skia/skia/include/gpu/gl/GrGLTypes.h208
-rw-r--r--gfx/skia/skia/include/gpu/gl/egl/GrGLMakeEGLInterface.h14
-rw-r--r--gfx/skia/skia/include/gpu/gl/glx/GrGLMakeGLXInterface.h14
-rw-r--r--gfx/skia/skia/include/gpu/graphite/BackendTexture.h153
-rw-r--r--gfx/skia/skia/include/gpu/graphite/Context.h166
-rw-r--r--gfx/skia/skia/include/gpu/graphite/ContextOptions.h87
-rw-r--r--gfx/skia/skia/include/gpu/graphite/GraphiteTypes.h105
-rw-r--r--gfx/skia/skia/include/gpu/graphite/ImageProvider.h60
-rw-r--r--gfx/skia/skia/include/gpu/graphite/Recorder.h212
-rw-r--r--gfx/skia/skia/include/gpu/graphite/Recording.h96
-rw-r--r--gfx/skia/skia/include/gpu/graphite/TextureInfo.h162
-rw-r--r--gfx/skia/skia/include/gpu/graphite/YUVABackendTextures.h139
-rw-r--r--gfx/skia/skia/include/gpu/graphite/dawn/DawnBackendContext.h25
-rw-r--r--gfx/skia/skia/include/gpu/graphite/dawn/DawnTypes.h40
-rw-r--r--gfx/skia/skia/include/gpu/graphite/dawn/DawnUtils.h26
-rw-r--r--gfx/skia/skia/include/gpu/graphite/mtl/MtlBackendContext.h25
-rw-r--r--gfx/skia/skia/include/gpu/graphite/mtl/MtlGraphiteTypes.h69
-rw-r--r--gfx/skia/skia/include/gpu/graphite/mtl/MtlGraphiteUtils.h25
-rw-r--r--gfx/skia/skia/include/gpu/graphite/vk/VulkanGraphiteTypes.h64
-rw-r--r--gfx/skia/skia/include/gpu/graphite/vk/VulkanGraphiteUtils.h26
-rw-r--r--gfx/skia/skia/include/gpu/mock/GrMockTypes.h146
-rw-r--r--gfx/skia/skia/include/gpu/mtl/GrMtlBackendContext.h21
-rw-r--r--gfx/skia/skia/include/gpu/mtl/GrMtlTypes.h63
-rw-r--r--gfx/skia/skia/include/gpu/mtl/MtlMemoryAllocator.h39
-rw-r--r--gfx/skia/skia/include/gpu/vk/GrVkBackendContext.h78
-rw-r--r--gfx/skia/skia/include/gpu/vk/GrVkExtensions.h15
-rw-r--r--gfx/skia/skia/include/gpu/vk/GrVkMemoryAllocator.h15
-rw-r--r--gfx/skia/skia/include/gpu/vk/GrVkTypes.h149
-rw-r--r--gfx/skia/skia/include/gpu/vk/VulkanBackendContext.h46
-rw-r--r--gfx/skia/skia/include/gpu/vk/VulkanExtensions.h67
-rw-r--r--gfx/skia/skia/include/gpu/vk/VulkanMemoryAllocator.h114
-rw-r--r--gfx/skia/skia/include/gpu/vk/VulkanTypes.h59
58 files changed, 7247 insertions, 0 deletions
diff --git a/gfx/skia/skia/include/gpu/GpuTypes.h b/gfx/skia/skia/include/gpu/GpuTypes.h
new file mode 100644
index 0000000000..e2e3961f8b
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GpuTypes.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_GpuTypes_DEFINED
+#define skgpu_GpuTypes_DEFINED
+
+#include "include/core/SkTypes.h"
+
+/**
+ * This file includes numerous public types that are used by all of our gpu backends.
+ */
+
+namespace skgpu {
+
+/**
+ * Possible 3D APIs that may be used by Graphite.
+ */
+enum class BackendApi : unsigned {
+ kDawn,
+ kMetal,
+ kVulkan,
+ kMock,
+};
+
+/** Indicates whether an allocation should count against a cache budget. */
+enum class Budgeted : bool {
+ kNo = false,
+ kYes = true,
+};
+
+/**
+ * Value passed into various callbacks to tell the client the result of operations connected to a
+ * specific callback. The actual interpretation of kFailed and kSuccess are dependent on the
+ * specific callbacks and are documented with the callback itself.
+ */
+enum class CallbackResult : bool {
+ kFailed = false,
+ kSuccess = true,
+};
+
+/**
+ * Is the texture mipmapped or not
+ */
+enum class Mipmapped : bool {
+ kNo = false,
+ kYes = true,
+};
+
+/**
+ * Is the data protected on the GPU or not.
+ */
+enum class Protected : bool {
+ kNo = false,
+ kYes = true,
+};
+
+/**
+ * Is a texture renderable or not
+ */
+enum class Renderable : bool {
+ kNo = false,
+ kYes = true,
+};
+
+} // namespace skgpu
+
+
+#endif // skgpu_GpuTypes_DEFINED
diff --git a/gfx/skia/skia/include/gpu/GrBackendDrawableInfo.h b/gfx/skia/skia/include/gpu/GrBackendDrawableInfo.h
new file mode 100644
index 0000000000..bda1e769fd
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrBackendDrawableInfo.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrBackendDrawableInfo_DEFINED
+#define GrBackendDrawableInfo_DEFINED
+
+#include "include/gpu/GrTypes.h"
+
+#include "include/gpu/vk/GrVkTypes.h"
+
+class SK_API GrBackendDrawableInfo {
+public:
+ // Creates an invalid backend drawable info.
+ GrBackendDrawableInfo() : fIsValid(false) {}
+
+ GrBackendDrawableInfo(const GrVkDrawableInfo& info)
+ : fIsValid(true)
+ , fBackend(GrBackendApi::kVulkan)
+ , fVkInfo(info) {}
+
+ // Returns true if the backend texture has been initialized.
+ bool isValid() const { return fIsValid; }
+
+ GrBackendApi backend() const { return fBackend; }
+
+ bool getVkDrawableInfo(GrVkDrawableInfo* outInfo) const {
+ if (this->isValid() && GrBackendApi::kVulkan == fBackend) {
+ *outInfo = fVkInfo;
+ return true;
+ }
+ return false;
+ }
+
+private:
+ bool fIsValid;
+ GrBackendApi fBackend;
+ GrVkDrawableInfo fVkInfo;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrBackendSemaphore.h b/gfx/skia/skia/include/gpu/GrBackendSemaphore.h
new file mode 100644
index 0000000000..13d07928e7
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrBackendSemaphore.h
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrBackendSemaphore_DEFINED
+#define GrBackendSemaphore_DEFINED
+
+#include "include/gpu/GrTypes.h"
+
+#include "include/gpu/gl/GrGLTypes.h"
+
+#ifdef SK_METAL
+#include "include/gpu/mtl/GrMtlTypes.h"
+#endif
+
+#ifdef SK_VULKAN
+#include "include/gpu/vk/GrVkTypes.h"
+#endif
+
+#ifdef SK_DIRECT3D
+#include "include/private/gpu/ganesh/GrD3DTypesMinimal.h"
+#endif
+
+/**
+ * Wrapper class for passing into and receiving data from Ganesh about a backend semaphore object.
+ */
+class GrBackendSemaphore {
+public:
+ // For convenience we just set the backend here to OpenGL. The GrBackendSemaphore cannot be used
+ // until either init* is called, which will set the appropriate GrBackend.
+ GrBackendSemaphore()
+ : fBackend(GrBackendApi::kOpenGL), fGLSync(nullptr), fIsInitialized(false) {}
+
+#ifdef SK_DIRECT3D
+ // We only need to specify these if Direct3D is enabled, because it requires special copy
+ // characteristics.
+ ~GrBackendSemaphore();
+ GrBackendSemaphore(const GrBackendSemaphore&);
+ GrBackendSemaphore& operator=(const GrBackendSemaphore&);
+#endif
+
+ void initGL(GrGLsync sync) {
+ fBackend = GrBackendApi::kOpenGL;
+ fGLSync = sync;
+ fIsInitialized = true;
+ }
+
+#ifdef SK_VULKAN
+ void initVulkan(VkSemaphore semaphore) {
+ fBackend = GrBackendApi::kVulkan;
+ fVkSemaphore = semaphore;
+
+ fIsInitialized = true;
+ }
+
+ VkSemaphore vkSemaphore() const {
+ if (!fIsInitialized || GrBackendApi::kVulkan != fBackend) {
+ return VK_NULL_HANDLE;
+ }
+ return fVkSemaphore;
+ }
+#endif
+
+#ifdef SK_METAL
+ // It is the creator's responsibility to ref the MTLEvent passed in here, via __bridge_retained.
+ // The other end will wrap this BackendSemaphore and take the ref, via __bridge_transfer.
+ void initMetal(GrMTLHandle event, uint64_t value) {
+ fBackend = GrBackendApi::kMetal;
+ fMtlEvent = event;
+ fMtlValue = value;
+
+ fIsInitialized = true;
+ }
+
+ GrMTLHandle mtlSemaphore() const {
+ if (!fIsInitialized || GrBackendApi::kMetal != fBackend) {
+ return nullptr;
+ }
+ return fMtlEvent;
+ }
+
+ uint64_t mtlValue() const {
+ if (!fIsInitialized || GrBackendApi::kMetal != fBackend) {
+ return 0;
+ }
+ return fMtlValue;
+ }
+
+#endif
+
+#ifdef SK_DIRECT3D
+ void initDirect3D(const GrD3DFenceInfo& info) {
+ fBackend = GrBackendApi::kDirect3D;
+ this->assignD3DFenceInfo(info);
+ fIsInitialized = true;
+ }
+#endif
+
+ bool isInitialized() const { return fIsInitialized; }
+
+ GrGLsync glSync() const {
+ if (!fIsInitialized || GrBackendApi::kOpenGL != fBackend) {
+ return nullptr;
+ }
+ return fGLSync;
+ }
+
+
+#ifdef SK_DIRECT3D
+ bool getD3DFenceInfo(GrD3DFenceInfo* outInfo) const;
+#endif
+
+private:
+#ifdef SK_DIRECT3D
+ void assignD3DFenceInfo(const GrD3DFenceInfo& info);
+#endif
+
+ GrBackendApi fBackend;
+ union {
+ GrGLsync fGLSync;
+#ifdef SK_VULKAN
+ VkSemaphore fVkSemaphore;
+#endif
+#ifdef SK_METAL
+ GrMTLHandle fMtlEvent; // Expected to be an id<MTLEvent>
+#endif
+#ifdef SK_DIRECT3D
+ GrD3DFenceInfo* fD3DFenceInfo;
+#endif
+ };
+#ifdef SK_METAL
+ uint64_t fMtlValue;
+#endif
+ bool fIsInitialized;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrBackendSurface.h b/gfx/skia/skia/include/gpu/GrBackendSurface.h
new file mode 100644
index 0000000000..e196cb9272
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrBackendSurface.h
@@ -0,0 +1,666 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrBackendSurface_DEFINED
+#define GrBackendSurface_DEFINED
+
+// This include of GrBackendSurfaceMutableState is not needed here, but some clients were depending
+// on the include here instead of including it themselves. Adding this back here until we can fix
+// up clients so it can be removed.
+#include "include/gpu/GrBackendSurfaceMutableState.h"
+
+#include "include/gpu/GrSurfaceInfo.h"
+#include "include/gpu/GrTypes.h"
+#include "include/gpu/MutableTextureState.h"
+#ifdef SK_GL
+#include "include/gpu/gl/GrGLTypes.h"
+#include "include/private/gpu/ganesh/GrGLTypesPriv.h"
+#endif
+#include "include/gpu/mock/GrMockTypes.h"
+#ifdef SK_VULKAN
+#include "include/gpu/vk/GrVkTypes.h"
+#include "include/private/gpu/ganesh/GrVkTypesPriv.h"
+#endif
+
+#ifdef SK_DAWN
+#include "include/gpu/dawn/GrDawnTypes.h"
+#endif
+
+#include <string>
+
+class GrVkImageLayout;
+class GrGLTextureParameters;
+class GrColorFormatDesc;
+enum class SkTextureCompressionType;
+
+namespace skgpu {
+class MutableTextureStateRef;
+}
+
+#ifdef SK_DAWN
+#include "webgpu/webgpu_cpp.h"
+#endif
+
+#ifdef SK_METAL
+#include "include/gpu/mtl/GrMtlTypes.h"
+#endif
+
+#ifdef SK_DIRECT3D
+#include "include/private/gpu/ganesh/GrD3DTypesMinimal.h"
+class GrD3DResourceState;
+#endif
+
+#if defined(SK_DEBUG) || GR_TEST_UTILS
+class SkString;
+#endif
+
+#if !defined(SK_GANESH)
+
+// SkSurfaceCharacterization always needs a minimal version of this
+class SK_API GrBackendFormat {
+public:
+ bool isValid() const { return false; }
+};
+
+// SkSurface and SkImage rely on a minimal version of these always being available
+class SK_API GrBackendTexture {
+public:
+ GrBackendTexture() {}
+
+ bool isValid() const { return false; }
+};
+
+class SK_API GrBackendRenderTarget {
+public:
+ GrBackendRenderTarget() {}
+
+ bool isValid() const { return false; }
+ bool isFramebufferOnly() const { return false; }
+};
+#else
+
+enum class GrGLFormat;
+
+class SK_API GrBackendFormat {
+public:
+ // Creates an invalid backend format.
+ GrBackendFormat() {}
+ GrBackendFormat(const GrBackendFormat&);
+ GrBackendFormat& operator=(const GrBackendFormat&);
+
+#ifdef SK_GL
+ static GrBackendFormat MakeGL(GrGLenum format, GrGLenum target) {
+ return GrBackendFormat(format, target);
+ }
+#endif
+
+#ifdef SK_VULKAN
+ static GrBackendFormat MakeVk(VkFormat format, bool willUseDRMFormatModifiers = false) {
+ return GrBackendFormat(format, GrVkYcbcrConversionInfo(), willUseDRMFormatModifiers);
+ }
+
+ static GrBackendFormat MakeVk(const GrVkYcbcrConversionInfo& ycbcrInfo,
+ bool willUseDRMFormatModifiers = false);
+#endif
+
+#ifdef SK_DAWN
+ static GrBackendFormat MakeDawn(wgpu::TextureFormat format) {
+ return GrBackendFormat(format);
+ }
+#endif
+
+#ifdef SK_METAL
+ static GrBackendFormat MakeMtl(GrMTLPixelFormat format) {
+ return GrBackendFormat(format);
+ }
+#endif
+
+#ifdef SK_DIRECT3D
+ static GrBackendFormat MakeDxgi(DXGI_FORMAT format) {
+ return GrBackendFormat(format);
+ }
+#endif
+
+ static GrBackendFormat MakeMock(GrColorType colorType, SkTextureCompressionType compression,
+ bool isStencilFormat = false);
+
+ bool operator==(const GrBackendFormat& that) const;
+ bool operator!=(const GrBackendFormat& that) const { return !(*this == that); }
+
+ GrBackendApi backend() const { return fBackend; }
+ GrTextureType textureType() const { return fTextureType; }
+
+ /**
+ * Gets the channels present in the format as a bitfield of SkColorChannelFlag values.
+ * Luminance channels are reported as kGray_SkColorChannelFlag.
+ */
+ uint32_t channelMask() const;
+
+ GrColorFormatDesc desc() const;
+
+#ifdef SK_GL
+ /**
+ * If the backend API is GL this gets the format as a GrGLFormat. Otherwise, returns
+ * GrGLFormat::kUnknown.
+ */
+ GrGLFormat asGLFormat() const;
+
+ GrGLenum asGLFormatEnum() const;
+#endif
+
+#ifdef SK_VULKAN
+ /**
+ * If the backend API is Vulkan this gets the format as a VkFormat and returns true. Otherwise,
+ * returns false.
+ */
+ bool asVkFormat(VkFormat*) const;
+
+ const GrVkYcbcrConversionInfo* getVkYcbcrConversionInfo() const;
+#endif
+
+#ifdef SK_DAWN
+ /**
+ * If the backend API is Dawn this gets the format as a wgpu::TextureFormat and returns true.
+ * Otherwise, returns false.
+ */
+ bool asDawnFormat(wgpu::TextureFormat*) const;
+#endif
+
+#ifdef SK_METAL
+ /**
+ * If the backend API is Metal this gets the format as a GrMtlPixelFormat. Otherwise,
+ * Otherwise, returns MTLPixelFormatInvalid.
+ */
+ GrMTLPixelFormat asMtlFormat() const;
+#endif
+
+#ifdef SK_DIRECT3D
+ /**
+ * If the backend API is Direct3D this gets the format as a DXGI_FORMAT and returns true.
+ * Otherwise, returns false.
+ */
+ bool asDxgiFormat(DXGI_FORMAT*) const;
+#endif
+
+ /**
+ * If the backend API is not Mock these three calls will return kUnknown, kNone or false,
+ * respectively. Otherwise, only one of the following can be true. The GrColorType is not
+ * kUnknown, the compression type is not kNone, or this is a mock stencil format.
+ */
+ GrColorType asMockColorType() const;
+ SkTextureCompressionType asMockCompressionType() const;
+ bool isMockStencilFormat() const;
+
+ // If possible, copies the GrBackendFormat and forces the texture type to be Texture2D. If the
+ // GrBackendFormat was for Vulkan and it originally had a GrVkYcbcrConversionInfo, we will
+ // remove the conversion and set the format to be VK_FORMAT_R8G8B8A8_UNORM.
+ GrBackendFormat makeTexture2D() const;
+
+ // Returns true if the backend format has been initialized.
+ bool isValid() const { return fValid; }
+
+#if defined(SK_DEBUG) || GR_TEST_UTILS
+ SkString toStr() const;
+#endif
+
+private:
+#ifdef SK_GL
+ GrBackendFormat(GrGLenum format, GrGLenum target);
+#endif
+
+#ifdef SK_VULKAN
+ GrBackendFormat(const VkFormat vkFormat, const GrVkYcbcrConversionInfo&,
+ bool willUseDRMFormatModifiers);
+#endif
+
+#ifdef SK_DAWN
+ GrBackendFormat(wgpu::TextureFormat format);
+#endif
+
+#ifdef SK_METAL
+ GrBackendFormat(const GrMTLPixelFormat mtlFormat);
+#endif
+
+#ifdef SK_DIRECT3D
+ GrBackendFormat(DXGI_FORMAT dxgiFormat);
+#endif
+
+ GrBackendFormat(GrColorType, SkTextureCompressionType, bool isStencilFormat);
+
+#ifdef SK_DEBUG
+ bool validateMock() const;
+#endif
+
+ GrBackendApi fBackend = GrBackendApi::kMock;
+ bool fValid = false;
+
+ union {
+#ifdef SK_GL
+ GrGLenum fGLFormat; // the sized, internal format of the GL resource
+#endif
+#ifdef SK_VULKAN
+ struct {
+ VkFormat fFormat;
+ GrVkYcbcrConversionInfo fYcbcrConversionInfo;
+ } fVk;
+#endif
+#ifdef SK_DAWN
+ wgpu::TextureFormat fDawnFormat;
+#endif
+
+#ifdef SK_METAL
+ GrMTLPixelFormat fMtlFormat;
+#endif
+
+#ifdef SK_DIRECT3D
+ DXGI_FORMAT fDxgiFormat;
+#endif
+ struct {
+ GrColorType fColorType;
+ SkTextureCompressionType fCompressionType;
+ bool fIsStencilFormat;
+ } fMock;
+ };
+ GrTextureType fTextureType = GrTextureType::kNone;
+};
+
+class SK_API GrBackendTexture {
+public:
+ // Creates an invalid backend texture.
+ GrBackendTexture();
+
+#ifdef SK_GL
+ // The GrGLTextureInfo must have a valid fFormat.
+ GrBackendTexture(int width,
+ int height,
+ GrMipmapped,
+ const GrGLTextureInfo& glInfo,
+ std::string_view label = {});
+#endif
+
+#ifdef SK_VULKAN
+ GrBackendTexture(int width,
+ int height,
+ const GrVkImageInfo& vkInfo,
+ std::string_view label = {});
+#endif
+
+#ifdef SK_METAL
+ GrBackendTexture(int width,
+ int height,
+ GrMipmapped,
+ const GrMtlTextureInfo& mtlInfo,
+ std::string_view label = {});
+#endif
+
+#ifdef SK_DIRECT3D
+ GrBackendTexture(int width,
+ int height,
+ const GrD3DTextureResourceInfo& d3dInfo,
+ std::string_view label = {});
+#endif
+
+#ifdef SK_DAWN
+ GrBackendTexture(int width,
+ int height,
+ const GrDawnTextureInfo& dawnInfo,
+ std::string_view label = {});
+#endif
+
+ GrBackendTexture(int width,
+ int height,
+ GrMipmapped,
+ const GrMockTextureInfo& mockInfo,
+ std::string_view label = {});
+
+ GrBackendTexture(const GrBackendTexture& that);
+
+ ~GrBackendTexture();
+
+ GrBackendTexture& operator=(const GrBackendTexture& that);
+
+ SkISize dimensions() const { return {fWidth, fHeight}; }
+ int width() const { return fWidth; }
+ int height() const { return fHeight; }
+ std::string_view getLabel() const { return fLabel; }
+ GrMipmapped mipmapped() const { return fMipmapped; }
+ bool hasMipmaps() const { return fMipmapped == GrMipmapped::kYes; }
+ /** deprecated alias of hasMipmaps(). */
+ bool hasMipMaps() const { return this->hasMipmaps(); }
+ GrBackendApi backend() const {return fBackend; }
+ GrTextureType textureType() const { return fTextureType; }
+
+#ifdef SK_GL
+ // If the backend API is GL, copies a snapshot of the GrGLTextureInfo struct into the passed in
+ // pointer and returns true. Otherwise returns false if the backend API is not GL.
+ bool getGLTextureInfo(GrGLTextureInfo*) const;
+
+ // Call this to indicate that the texture parameters have been modified in the GL context
+ // externally to GrContext.
+ void glTextureParametersModified();
+#endif
+
+#ifdef SK_DAWN
+ // If the backend API is Dawn, copies a snapshot of the GrDawnTextureInfo struct into the passed
+ // in pointer and returns true. Otherwise returns false if the backend API is not Dawn.
+ bool getDawnTextureInfo(GrDawnTextureInfo*) const;
+#endif
+
+#ifdef SK_VULKAN
+ // If the backend API is Vulkan, copies a snapshot of the GrVkImageInfo struct into the passed
+ // in pointer and returns true. This snapshot will set the fImageLayout to the current layout
+ // state. Otherwise returns false if the backend API is not Vulkan.
+ bool getVkImageInfo(GrVkImageInfo*) const;
+
+ // Anytime the client changes the VkImageLayout of the VkImage captured by this
+ // GrBackendTexture, they must call this function to notify Skia of the changed layout.
+ void setVkImageLayout(VkImageLayout);
+#endif
+
+#ifdef SK_METAL
+ // If the backend API is Metal, copies a snapshot of the GrMtlTextureInfo struct into the passed
+ // in pointer and returns true. Otherwise returns false if the backend API is not Metal.
+ bool getMtlTextureInfo(GrMtlTextureInfo*) const;
+#endif
+
+#ifdef SK_DIRECT3D
+ // If the backend API is Direct3D, copies a snapshot of the GrD3DTextureResourceInfo struct into
+ // the passed in pointer and returns true. This snapshot will set the fResourceState to the
+ // current resource state. Otherwise returns false if the backend API is not D3D.
+ bool getD3DTextureResourceInfo(GrD3DTextureResourceInfo*) const;
+
+ // Anytime the client changes the D3D12_RESOURCE_STATES of the D3D12_RESOURCE captured by this
+ // GrBackendTexture, they must call this function to notify Skia of the changed layout.
+ void setD3DResourceState(GrD3DResourceStateEnum);
+#endif
+
+ // Get the GrBackendFormat for this texture (or an invalid format if this is not valid).
+ GrBackendFormat getBackendFormat() const;
+
+ // If the backend API is Mock, copies a snapshot of the GrMockTextureInfo struct into the passed
+ // in pointer and returns true. Otherwise returns false if the backend API is not Mock.
+ bool getMockTextureInfo(GrMockTextureInfo*) const;
+
+ // If the client changes any of the mutable backend of the GrBackendTexture they should call
+ // this function to inform Skia that those values have changed. The backend API specific state
+ // that can be set from this function are:
+ //
+ // Vulkan: VkImageLayout and QueueFamilyIndex
+ void setMutableState(const skgpu::MutableTextureState&);
+
+ // Returns true if we are working with protected content.
+ bool isProtected() const;
+
+ // Returns true if the backend texture has been initialized.
+ bool isValid() const { return fIsValid; }
+
+ // Returns true if both textures are valid and refer to the same API texture.
+ bool isSameTexture(const GrBackendTexture&);
+
+#if GR_TEST_UTILS
+ static bool TestingOnly_Equals(const GrBackendTexture& , const GrBackendTexture&);
+#endif
+
+private:
+ friend class GrVkGpu; // for getMutableState
+ sk_sp<skgpu::MutableTextureStateRef> getMutableState() const;
+
+#ifdef SK_GL
+ friend class GrGLTexture;
+ friend class GrGLGpu; // for getGLTextureParams
+ GrBackendTexture(int width,
+ int height,
+ GrMipmapped,
+ const GrGLTextureInfo,
+ sk_sp<GrGLTextureParameters>,
+ std::string_view label = {});
+ sk_sp<GrGLTextureParameters> getGLTextureParams() const;
+#endif
+
+#ifdef SK_VULKAN
+ friend class GrVkTexture;
+ GrBackendTexture(int width,
+ int height,
+ const GrVkImageInfo& vkInfo,
+ sk_sp<skgpu::MutableTextureStateRef> mutableState,
+ std::string_view label = {});
+#endif
+
+#ifdef SK_DIRECT3D
+ friend class GrD3DTexture;
+ friend class GrD3DGpu; // for getGrD3DResourceState
+ GrBackendTexture(int width,
+ int height,
+ const GrD3DTextureResourceInfo& vkInfo,
+ sk_sp<GrD3DResourceState> state,
+ std::string_view label = {});
+ sk_sp<GrD3DResourceState> getGrD3DResourceState() const;
+#endif
+
+ // Free and release and resources being held by the GrBackendTexture.
+ void cleanup();
+
+ bool fIsValid;
+ int fWidth; //<! width in pixels
+ int fHeight; //<! height in pixels
+ const std::string fLabel;
+ GrMipmapped fMipmapped;
+ GrBackendApi fBackend;
+ GrTextureType fTextureType;
+
+ union {
+#ifdef SK_GL
+ GrGLBackendTextureInfo fGLInfo;
+#endif
+#ifdef SK_VULKAN
+ GrVkBackendSurfaceInfo fVkInfo;
+#endif
+ GrMockTextureInfo fMockInfo;
+#ifdef SK_DIRECT3D
+ GrD3DBackendSurfaceInfo fD3DInfo;
+#endif
+ };
+#ifdef SK_METAL
+ GrMtlTextureInfo fMtlInfo;
+#endif
+#ifdef SK_DAWN
+ GrDawnTextureInfo fDawnInfo;
+#endif
+
+ sk_sp<skgpu::MutableTextureStateRef> fMutableState;
+};
+
+class SK_API GrBackendRenderTarget {
+public:
+ // Creates an invalid backend texture.
+ GrBackendRenderTarget();
+
+#ifdef SK_GL
+ // The GrGLTextureInfo must have a valid fFormat. If wrapping in an SkSurface we require the
+ // stencil bits to be either 0, 8 or 16.
+ GrBackendRenderTarget(int width,
+ int height,
+ int sampleCnt,
+ int stencilBits,
+ const GrGLFramebufferInfo& glInfo);
+#endif
+
+#ifdef SK_DAWN
+ // If wrapping in an SkSurface we require the stencil bits to be either 0, 8 or 16.
+ GrBackendRenderTarget(int width,
+ int height,
+ int sampleCnt,
+ int stencilBits,
+ const GrDawnRenderTargetInfo& dawnInfo);
+#endif
+
+#ifdef SK_VULKAN
+ /** Deprecated. Sample count is now part of GrVkImageInfo. */
+ GrBackendRenderTarget(int width, int height, int sampleCnt, const GrVkImageInfo& vkInfo);
+
+ GrBackendRenderTarget(int width, int height, const GrVkImageInfo& vkInfo);
+#endif
+
+#ifdef SK_METAL
+ GrBackendRenderTarget(int width,
+ int height,
+ const GrMtlTextureInfo& mtlInfo);
+ /** Deprecated. Sample count is ignored and is instead retrieved from the MtlTexture. */
+ GrBackendRenderTarget(int width,
+ int height,
+ int sampleCnt,
+ const GrMtlTextureInfo& mtlInfo);
+#endif
+
+#ifdef SK_DIRECT3D
+ GrBackendRenderTarget(int width,
+ int height,
+ const GrD3DTextureResourceInfo& d3dInfo);
+#endif
+
+ GrBackendRenderTarget(int width,
+ int height,
+ int sampleCnt,
+ int stencilBits,
+ const GrMockRenderTargetInfo& mockInfo);
+
+ ~GrBackendRenderTarget();
+
+ GrBackendRenderTarget(const GrBackendRenderTarget& that);
+ GrBackendRenderTarget& operator=(const GrBackendRenderTarget&);
+
+ SkISize dimensions() const { return {fWidth, fHeight}; }
+ int width() const { return fWidth; }
+ int height() const { return fHeight; }
+ int sampleCnt() const { return fSampleCnt; }
+ int stencilBits() const { return fStencilBits; }
+ GrBackendApi backend() const {return fBackend; }
+ bool isFramebufferOnly() const { return fFramebufferOnly; }
+
+#ifdef SK_GL
+ // If the backend API is GL, copies a snapshot of the GrGLFramebufferInfo struct into the passed
+ // in pointer and returns true. Otherwise returns false if the backend API is not GL.
+ bool getGLFramebufferInfo(GrGLFramebufferInfo*) const;
+#endif
+
+#ifdef SK_DAWN
+ // If the backend API is Dawn, copies a snapshot of the GrDawnRenderTargetInfo struct into the
+ // passed-in pointer and returns true. Otherwise returns false if the backend API is not Dawn.
+ bool getDawnRenderTargetInfo(GrDawnRenderTargetInfo*) const;
+#endif
+
+#ifdef SK_VULKAN
+ // If the backend API is Vulkan, copies a snapshot of the GrVkImageInfo struct into the passed
+ // in pointer and returns true. This snapshot will set the fImageLayout to the current layout
+ // state. Otherwise returns false if the backend API is not Vulkan.
+ bool getVkImageInfo(GrVkImageInfo*) const;
+
+ // Anytime the client changes the VkImageLayout of the VkImage captured by this
+ // GrBackendRenderTarget, they must call this function to notify Skia of the changed layout.
+ void setVkImageLayout(VkImageLayout);
+#endif
+
+#ifdef SK_METAL
+ // If the backend API is Metal, copies a snapshot of the GrMtlTextureInfo struct into the passed
+ // in pointer and returns true. Otherwise returns false if the backend API is not Metal.
+ bool getMtlTextureInfo(GrMtlTextureInfo*) const;
+#endif
+
+#ifdef SK_DIRECT3D
+ // If the backend API is Direct3D, copies a snapshot of the GrMtlTextureInfo struct into the
+ // passed in pointer and returns true. Otherwise returns false if the backend API is not D3D.
+ bool getD3DTextureResourceInfo(GrD3DTextureResourceInfo*) const;
+
+ // Anytime the client changes the D3D12_RESOURCE_STATES of the D3D12_RESOURCE captured by this
+ // GrBackendTexture, they must call this function to notify Skia of the changed layout.
+ void setD3DResourceState(GrD3DResourceStateEnum);
+#endif
+
+ // Get the GrBackendFormat for this render target (or an invalid format if this is not valid).
+ GrBackendFormat getBackendFormat() const;
+
+ // If the backend API is Mock, copies a snapshot of the GrMockTextureInfo struct into the passed
+ // in pointer and returns true. Otherwise returns false if the backend API is not Mock.
+ bool getMockRenderTargetInfo(GrMockRenderTargetInfo*) const;
+
+ // If the client changes any of the mutable backend of the GrBackendTexture they should call
+ // this function to inform Skia that those values have changed. The backend API specific state
+ // that can be set from this function are:
+ //
+ // Vulkan: VkImageLayout and QueueFamilyIndex
+ void setMutableState(const skgpu::MutableTextureState&);
+
+ // Returns true if we are working with protected content.
+ bool isProtected() const;
+
+ // Returns true if the backend texture has been initialized.
+ bool isValid() const { return fIsValid; }
+
+
+#if GR_TEST_UTILS
+ static bool TestingOnly_Equals(const GrBackendRenderTarget&, const GrBackendRenderTarget&);
+#endif
+
+private:
+ friend class GrVkGpu; // for getMutableState
+ sk_sp<skgpu::MutableTextureStateRef> getMutableState() const;
+
+#ifdef SK_VULKAN
+ friend class GrVkRenderTarget;
+ GrBackendRenderTarget(int width,
+ int height,
+ const GrVkImageInfo& vkInfo,
+ sk_sp<skgpu::MutableTextureStateRef> mutableState);
+#endif
+
+#ifdef SK_DIRECT3D
+ friend class GrD3DGpu;
+ friend class GrD3DRenderTarget;
+ GrBackendRenderTarget(int width,
+ int height,
+ const GrD3DTextureResourceInfo& d3dInfo,
+ sk_sp<GrD3DResourceState> state);
+ sk_sp<GrD3DResourceState> getGrD3DResourceState() const;
+#endif
+
+ // Free and release and resources being held by the GrBackendTexture.
+ void cleanup();
+
+ bool fIsValid;
+ bool fFramebufferOnly = false;
+ int fWidth; //<! width in pixels
+ int fHeight; //<! height in pixels
+
+ int fSampleCnt;
+ int fStencilBits;
+
+ GrBackendApi fBackend;
+
+ union {
+#ifdef SK_GL
+ GrGLFramebufferInfo fGLInfo;
+#endif
+#ifdef SK_VULKAN
+ GrVkBackendSurfaceInfo fVkInfo;
+#endif
+ GrMockRenderTargetInfo fMockInfo;
+#ifdef SK_DIRECT3D
+ GrD3DBackendSurfaceInfo fD3DInfo;
+#endif
+ };
+#ifdef SK_METAL
+ GrMtlTextureInfo fMtlInfo;
+#endif
+#ifdef SK_DAWN
+ GrDawnRenderTargetInfo fDawnInfo;
+#endif
+ sk_sp<skgpu::MutableTextureStateRef> fMutableState;
+};
+
+#endif
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrBackendSurfaceMutableState.h b/gfx/skia/skia/include/gpu/GrBackendSurfaceMutableState.h
new file mode 100644
index 0000000000..cbf27bf7e5
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrBackendSurfaceMutableState.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrBackendSurfaceMutableState_DEFINED
+#define GrBackendSurfaceMutableState_DEFINED
+
+#include "include/gpu/MutableTextureState.h"
+
+class GrBackendSurfaceMutableState : public skgpu::MutableTextureState {
+public:
+ GrBackendSurfaceMutableState() = default;
+
+#ifdef SK_VULKAN
+ GrBackendSurfaceMutableState(VkImageLayout layout, uint32_t queueFamilyIndex)
+ : skgpu::MutableTextureState(layout, queueFamilyIndex) {}
+#endif
+
+ GrBackendSurfaceMutableState(const GrBackendSurfaceMutableState& that)
+ : skgpu::MutableTextureState(that) {}
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrContextOptions.h b/gfx/skia/skia/include/gpu/GrContextOptions.h
new file mode 100644
index 0000000000..bf4ca409a8
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrContextOptions.h
@@ -0,0 +1,374 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrContextOptions_DEFINED
+#define GrContextOptions_DEFINED
+
+#include "include/core/SkData.h"
+#include "include/core/SkString.h"
+#include "include/core/SkTypes.h"
+#include "include/gpu/GrDriverBugWorkarounds.h"
+#include "include/gpu/GrTypes.h"
+#include "include/gpu/ShaderErrorHandler.h"
+#include "include/private/gpu/ganesh/GrTypesPriv.h"
+
+#include <vector>
+
+class SkExecutor;
+
+#if defined(SK_GANESH)
+struct SK_API GrContextOptions {
+ enum class Enable {
+ /** Forces an option to be disabled. */
+ kNo,
+ /** Forces an option to be enabled. */
+ kYes,
+ /**
+ * Uses Skia's default behavior, which may use runtime properties (e.g. driver version).
+ */
+ kDefault
+ };
+
+ enum class ShaderCacheStrategy {
+ kSkSL,
+ kBackendSource,
+ kBackendBinary,
+ };
+
+ /**
+ * Abstract class which stores Skia data in a cache that persists between sessions. Currently,
+ * Skia stores compiled shader binaries (only when glProgramBinary / glGetProgramBinary are
+ * supported) when provided a persistent cache, but this may extend to other data in the future.
+ */
+ class SK_API PersistentCache {
+ public:
+ virtual ~PersistentCache() = default;
+
+ /**
+ * Returns the data for the key if it exists in the cache, otherwise returns null.
+ */
+ virtual sk_sp<SkData> load(const SkData& key) = 0;
+
+ // Placeholder until all clients override the 3-parameter store(), then remove this, and
+ // make that version pure virtual.
+ virtual void store(const SkData& /*key*/, const SkData& /*data*/) { SkASSERT(false); }
+
+ /**
+ * Stores data in the cache, indexed by key. description provides a human-readable
+ * version of the key.
+ */
+ virtual void store(const SkData& key, const SkData& data, const SkString& /*description*/) {
+ this->store(key, data);
+ }
+
+ protected:
+ PersistentCache() = default;
+ PersistentCache(const PersistentCache&) = delete;
+ PersistentCache& operator=(const PersistentCache&) = delete;
+ };
+
+ using ShaderErrorHandler = skgpu::ShaderErrorHandler;
+
+ GrContextOptions() {}
+
+ // Suppress prints for the GrContext.
+ bool fSuppressPrints = false;
+
+ /**
+ * Controls whether we check for GL errors after functions that allocate resources (e.g.
+ * glTexImage2D), at the end of a GPU submission, or checking framebuffer completeness. The
+ * results of shader compilation and program linking are always checked, regardless of this
+ * option. Ignored on backends other than GL.
+ */
+ Enable fSkipGLErrorChecks = Enable::kDefault;
+
+ /** Overrides: These options override feature detection using backend API queries. These
+ overrides can only reduce the feature set or limits, never increase them beyond the
+ detected values. */
+
+ int fMaxTextureSizeOverride = SK_MaxS32;
+
+ /** the threshold in bytes above which we will use a buffer mapping API to map vertex and index
+ buffers to CPU memory in order to update them. A value of -1 means the GrContext should
+ deduce the optimal value for this platform. */
+ int fBufferMapThreshold = -1;
+
+ /**
+ * Executor to handle threaded work within Ganesh. If this is nullptr, then all work will be
+ * done serially on the main thread. To have worker threads assist with various tasks, set this
+ * to a valid SkExecutor instance. Currently, used for software path rendering, but may be used
+ * for other tasks.
+ */
+ SkExecutor* fExecutor = nullptr;
+
+ /** Construct mipmaps manually, via repeated downsampling draw-calls. This is used when
+ the driver's implementation (glGenerateMipmap) contains bugs. This requires mipmap
+ level control (ie desktop or ES3). */
+ bool fDoManualMipmapping = false;
+
+ /**
+ * Disables the use of coverage counting shortcuts to render paths. Coverage counting can cause
+ * artifacts along shared edges if care isn't taken to ensure both contours wind in the same
+ * direction.
+ */
+ // FIXME: Once this is removed from Chrome and Android, rename to fEnable"".
+ bool fDisableCoverageCountingPaths = true;
+
+ /**
+ * Disables distance field rendering for paths. Distance field computation can be expensive,
+ * and yields no benefit if a path is not rendered multiple times with different transforms.
+ */
+ bool fDisableDistanceFieldPaths = false;
+
+ /**
+ * If true this allows path mask textures to be cached. This is only really useful if paths
+ * are commonly rendered at the same scale and fractional translation.
+ */
+ bool fAllowPathMaskCaching = true;
+
+ /**
+ * If true, the GPU will not be used to perform YUV -> RGB conversion when generating
+ * textures from codec-backed images.
+ */
+ bool fDisableGpuYUVConversion = false;
+
+ /**
+ * The maximum size of cache textures used for Skia's Glyph cache.
+ */
+ size_t fGlyphCacheTextureMaximumBytes = 2048 * 1024 * 4;
+
+ /**
+ * Below this threshold size in device space distance field fonts won't be used. Distance field
+ * fonts don't support hinting which is more important at smaller sizes.
+ */
+ float fMinDistanceFieldFontSize = 18;
+
+ /**
+ * Above this threshold size in device space glyphs are drawn as individual paths.
+ */
+#if defined(SK_BUILD_FOR_ANDROID)
+ float fGlyphsAsPathsFontSize = 384;
+#elif defined(SK_BUILD_FOR_MAC)
+ float fGlyphsAsPathsFontSize = 256;
+#else
+ float fGlyphsAsPathsFontSize = 324;
+#endif
+
+ /**
+ * Can the glyph atlas use multiple textures. If allowed, the each texture's size is bound by
+ * fGlypheCacheTextureMaximumBytes.
+ */
+ Enable fAllowMultipleGlyphCacheTextures = Enable::kDefault;
+
+ /**
+ * Bugs on certain drivers cause stencil buffers to leak. This flag causes Skia to avoid
+ * allocating stencil buffers and use alternate rasterization paths, avoiding the leak.
+ */
+ bool fAvoidStencilBuffers = false;
+
+ /**
+ * Enables driver workaround to use draws instead of HW clears, e.g. glClear on the GL backend.
+ */
+ Enable fUseDrawInsteadOfClear = Enable::kDefault;
+
+ /**
+ * Allow Ganesh to more aggressively reorder operations to reduce the number of render passes.
+ * Offscreen draws will be done upfront instead of interrupting the main render pass when
+ * possible. May increase VRAM usage, but still observes the resource cache limit.
+ * Enabled by default.
+ */
+ Enable fReduceOpsTaskSplitting = Enable::kDefault;
+
+ /**
+ * Some ES3 contexts report the ES2 external image extension, but not the ES3 version.
+ * If support for external images is critical, enabling this option will cause Ganesh to limit
+ * shaders to the ES2 shading language in that situation.
+ */
+ bool fPreferExternalImagesOverES3 = false;
+
+ /**
+ * Disables correctness workarounds that are enabled for particular GPUs, OSes, or drivers.
+ * This does not affect code path choices that are made for perfomance reasons nor does it
+ * override other GrContextOption settings.
+ */
+ bool fDisableDriverCorrectnessWorkarounds = false;
+
+ /**
+ * Maximum number of GPU programs or pipelines to keep active in the runtime cache.
+ */
+ int fRuntimeProgramCacheSize = 256;
+
+ /**
+ * Cache in which to store compiled shader binaries between runs.
+ */
+ PersistentCache* fPersistentCache = nullptr;
+
+ /**
+ * This affects the usage of the PersistentCache. We can cache SkSL, backend source (GLSL), or
+ * backend binaries (GL program binaries). By default we cache binaries, but if the driver's
+ * binary loading/storing is believed to have bugs, this can be limited to caching GLSL.
+ * Caching GLSL strings still saves CPU work when a GL program is created.
+ */
+ ShaderCacheStrategy fShaderCacheStrategy = ShaderCacheStrategy::kBackendBinary;
+
+ /**
+ * If present, use this object to report shader compilation failures. If not, report failures
+ * via SkDebugf and assert.
+ */
+ ShaderErrorHandler* fShaderErrorHandler = nullptr;
+
+ /**
+ * Specifies the number of samples Ganesh should use when performing internal draws with MSAA
+ * (hardware capabilities permitting).
+ *
+ * If 0, Ganesh will disable internal code paths that use multisampling.
+ */
+ int fInternalMultisampleCount = 4;
+
+ /**
+ * In Skia's vulkan backend a single GrContext submit equates to the submission of a single
+ * primary command buffer to the VkQueue. This value specifies how many vulkan secondary command
+ * buffers we will cache for reuse on a given primary command buffer. A single submit may use
+ * more than this many secondary command buffers, but after the primary command buffer is
+ * finished on the GPU it will only hold on to this many secondary command buffers for reuse.
+ *
+ * A value of -1 means we will pick a limit value internally.
+ */
+ int fMaxCachedVulkanSecondaryCommandBuffers = -1;
+
+ /**
+ * If true, the caps will never support mipmaps.
+ */
+ bool fSuppressMipmapSupport = false;
+
+ /**
+ * If true, the TessellationPathRenderer will not be used for path rendering.
+ * If false, will fallback to any driver workarounds, if set.
+ */
+ bool fDisableTessellationPathRenderer = false;
+
+ /**
+ * If true, and if supported, enables hardware tessellation in the caps.
+ * DEPRECATED: This value is ignored; experimental hardware tessellation is always disabled.
+ */
+ bool fEnableExperimentalHardwareTessellation = false;
+
+ /**
+ * If true, then add 1 pixel padding to all glyph masks in the atlas to support bi-lerp
+ * rendering of all glyphs. This must be set to true to use Slugs.
+ */
+ bool fSupportBilerpFromGlyphAtlas = false;
+
+ /**
+ * Uses a reduced variety of shaders. May perform less optimally in steady state but can reduce
+ * jank due to shader compilations.
+ */
+ bool fReducedShaderVariations = false;
+
+ /**
+ * If true, then allow to enable MSAA on new Intel GPUs.
+ */
+ bool fAllowMSAAOnNewIntel = false;
+
+ /**
+ * Currently on ARM Android we disable the use of GL TexStorage because of memory regressions.
+ * However, some clients may still want to use TexStorage. For example, TexStorage support is
+ * required for creating protected textures.
+ *
+ * This flag has no impact on non GL backends.
+ */
+ bool fAlwaysUseTexStorageWhenAvailable = false;
+
+ /**
+ * Optional callback that can be passed into the GrDirectContext which will be called when the
+ * GrDirectContext is about to be destroyed. When this call is made, it will be safe for the
+ * client to delete the GPU backend context that is backing the GrDirectContext. The
+ * GrDirectContextDestroyedContext will be passed back to the client in the callback.
+ */
+ GrDirectContextDestroyedContext fContextDeleteContext = nullptr;
+ GrDirectContextDestroyedProc fContextDeleteProc = nullptr;
+
+#if GR_TEST_UTILS
+ /**
+ * Private options that are only meant for testing within Skia's tools.
+ */
+
+ /**
+ * Testing-only mode to exercise allocation failures in the flush-time callback objects.
+ * For now it only simulates allocation failure during the preFlush callback.
+ */
+ bool fFailFlushTimeCallbacks = false;
+
+ /**
+ * Prevents use of dual source blending, to test that all xfer modes work correctly without it.
+ */
+ bool fSuppressDualSourceBlending = false;
+
+ /**
+ * Prevents the use of non-coefficient-based blend equations, for testing dst reads, barriers,
+ * and in-shader blending.
+ */
+ bool fSuppressAdvancedBlendEquations = false;
+
+ /**
+ * Prevents the use of framebuffer fetches, for testing dst reads and texture barriers.
+ */
+ bool fSuppressFramebufferFetch = false;
+
+ /**
+ * If true, then all paths are processed as if "setIsVolatile" had been called.
+ */
+ bool fAllPathsVolatile = false;
+
+ /**
+ * Render everything in wireframe
+ */
+ bool fWireframeMode = false;
+
+ /**
+ * Enforces clearing of all textures when they're created.
+ */
+ bool fClearAllTextures = false;
+
+ /**
+ * Randomly generate a (false) GL_OUT_OF_MEMORY error
+ */
+ bool fRandomGLOOM = false;
+
+ /**
+ * Force off support for write/transfer pixels row bytes in caps.
+ */
+ bool fDisallowWriteAndTransferPixelRowBytes = false;
+
+ /**
+ * Include or exclude specific GPU path renderers.
+ */
+ GpuPathRenderers fGpuPathRenderers = GpuPathRenderers::kDefault;
+
+ /**
+ * Specify the GPU resource cache limit. Equivalent to calling `setResourceCacheLimit` on the
+ * context at construction time.
+ *
+ * A value of -1 means use the default limit value.
+ */
+ int fResourceCacheLimitOverride = -1;
+
+ /**
+ * Maximum width and height of internal texture atlases.
+ */
+ int fMaxTextureAtlasSize = 2048;
+#endif
+
+ GrDriverBugWorkarounds fDriverBugWorkarounds;
+};
+#else
+struct GrContextOptions {
+ struct PersistentCache {};
+};
+#endif
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrContextThreadSafeProxy.h b/gfx/skia/skia/include/gpu/GrContextThreadSafeProxy.h
new file mode 100644
index 0000000000..eb75555364
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrContextThreadSafeProxy.h
@@ -0,0 +1,169 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrContextThreadSafeProxy_DEFINED
+#define GrContextThreadSafeProxy_DEFINED
+
+#include "include/core/SkRefCnt.h"
+
+#if defined(SK_GANESH)
+
+#include "include/core/SkImageInfo.h"
+#include "include/gpu/GpuTypes.h"
+#include "include/gpu/GrContextOptions.h"
+#include "include/gpu/GrTypes.h"
+
+#include <atomic>
+
+class GrBackendFormat;
+class GrCaps;
+class GrContextThreadSafeProxyPriv;
+class GrThreadSafeCache;
+class GrThreadSafePipelineBuilder;
+class SkSurfaceCharacterization;
+class SkSurfaceProps;
+enum class SkTextureCompressionType;
+
+namespace sktext::gpu { class TextBlobRedrawCoordinator; }
+
+/**
+ * Can be used to perform actions related to the generating GrContext in a thread safe manner. The
+ * proxy does not access the 3D API (e.g. OpenGL) that backs the generating GrContext.
+ */
+class SK_API GrContextThreadSafeProxy final : public SkNVRefCnt<GrContextThreadSafeProxy> {
+public:
+ ~GrContextThreadSafeProxy();
+
+ /**
+ * Create a surface characterization for a DDL that will be replayed into the GrContext
+ * that created this proxy. On failure the resulting characterization will be invalid (i.e.,
+ * "!c.isValid()").
+ *
+ * @param cacheMaxResourceBytes The max resource bytes limit that will be in effect
+ * when the DDL created with this characterization is
+ * replayed.
+ * Note: the contract here is that the DDL will be
+ * created as if it had a full 'cacheMaxResourceBytes'
+ * to use. If replayed into a GrContext that already has
+ * locked GPU memory, the replay can exceed the budget.
+ * To rephrase, all resource allocation decisions are
+ * made at record time and at playback time the budget
+ * limits will be ignored.
+ * @param ii The image info specifying properties of the SkSurface
+ * that the DDL created with this characterization will
+ * be replayed into.
+ * Note: Ganesh doesn't make use of the SkImageInfo's
+ * alphaType
+ * @param backendFormat Information about the format of the GPU surface that
+ * will back the SkSurface upon replay
+ * @param sampleCount The sample count of the SkSurface that the DDL
+ * created with this characterization will be replayed
+ * into
+ * @param origin The origin of the SkSurface that the DDL created with
+ * this characterization will be replayed into
+ * @param surfaceProps The surface properties of the SkSurface that the DDL
+ * created with this characterization will be replayed
+ * into
+ * @param isMipMapped Will the surface the DDL will be replayed into have
+ * space allocated for mipmaps?
+ * @param willUseGLFBO0 Will the surface the DDL will be replayed into be
+ * backed by GL FBO 0. This flag is only valid if using
+ * an GL backend.
+ * @param isTextureable Will the surface be able to act as a texture?
+ * @param isProtected Will the (Vulkan) surface be DRM protected?
+ * @param vkRTSupportsInputAttachment Can the vulkan surface be used as in input
+ attachment?
+ * @param forVulkanSecondaryCommandBuffer Will the surface be wrapping a vulkan secondary
+ * command buffer via a GrVkSecondaryCBDrawContext? If
+ * this is true then the following is required:
+ * isTexureable = false
+ * isMipMapped = false
+ * willUseGLFBO0 = false
+ * vkRTSupportsInputAttachment = false
+ */
+ SkSurfaceCharacterization createCharacterization(
+ size_t cacheMaxResourceBytes,
+ const SkImageInfo& ii,
+ const GrBackendFormat& backendFormat,
+ int sampleCount,
+ GrSurfaceOrigin origin,
+ const SkSurfaceProps& surfaceProps,
+ bool isMipMapped,
+ bool willUseGLFBO0 = false,
+ bool isTextureable = true,
+ GrProtected isProtected = GrProtected::kNo,
+ bool vkRTSupportsInputAttachment = false,
+ bool forVulkanSecondaryCommandBuffer = false);
+
+ /*
+ * Retrieve the default GrBackendFormat for a given SkColorType and renderability.
+ * It is guaranteed that this backend format will be the one used by the following
+ * SkColorType and SkSurfaceCharacterization-based createBackendTexture methods.
+ *
+ * The caller should check that the returned format is valid.
+ */
+ GrBackendFormat defaultBackendFormat(SkColorType ct, GrRenderable renderable) const;
+
+ /**
+ * Retrieve the GrBackendFormat for a given SkTextureCompressionType. This is
+ * guaranteed to match the backend format used by the following
+ * createCompressedBackendTexture methods that take a CompressionType.
+ *
+ * The caller should check that the returned format is valid.
+ */
+ GrBackendFormat compressedBackendFormat(SkTextureCompressionType c) const;
+
+ /**
+ * Gets the maximum supported sample count for a color type. 1 is returned if only non-MSAA
+ * rendering is supported for the color type. 0 is returned if rendering to this color type
+ * is not supported at all.
+ */
+ int maxSurfaceSampleCountForColorType(SkColorType colorType) const;
+
+ bool isValid() const { return nullptr != fCaps; }
+
+ bool operator==(const GrContextThreadSafeProxy& that) const {
+ // Each GrContext should only ever have a single thread-safe proxy.
+ SkASSERT((this == &that) == (this->fContextID == that.fContextID));
+ return this == &that;
+ }
+
+ bool operator!=(const GrContextThreadSafeProxy& that) const { return !(*this == that); }
+
+ // Provides access to functions that aren't part of the public API.
+ GrContextThreadSafeProxyPriv priv();
+ const GrContextThreadSafeProxyPriv priv() const; // NOLINT(readability-const-return-type)
+
+private:
+ friend class GrContextThreadSafeProxyPriv; // for ctor and hidden methods
+
+ // DDL TODO: need to add unit tests for backend & maybe options
+ GrContextThreadSafeProxy(GrBackendApi, const GrContextOptions&);
+
+ void abandonContext();
+ bool abandoned() const;
+
+ // TODO: This should be part of the constructor but right now we have a chicken-and-egg problem
+ // with GrContext where we get the caps by creating a GPU which requires a context (see the
+ // `init` method on GrContext_Base).
+ void init(sk_sp<const GrCaps>, sk_sp<GrThreadSafePipelineBuilder>);
+
+ const GrBackendApi fBackend;
+ const GrContextOptions fOptions;
+ const uint32_t fContextID;
+ sk_sp<const GrCaps> fCaps;
+ std::unique_ptr<sktext::gpu::TextBlobRedrawCoordinator> fTextBlobRedrawCoordinator;
+ std::unique_ptr<GrThreadSafeCache> fThreadSafeCache;
+ sk_sp<GrThreadSafePipelineBuilder> fPipelineBuilder;
+ std::atomic<bool> fAbandoned{false};
+};
+
+#else // !defined(SK_GANESH)
+class SK_API GrContextThreadSafeProxy final : public SkNVRefCnt<GrContextThreadSafeProxy> {};
+#endif
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrDirectContext.h b/gfx/skia/skia/include/gpu/GrDirectContext.h
new file mode 100644
index 0000000000..05c8099d3d
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrDirectContext.h
@@ -0,0 +1,908 @@
+/*
+ * Copyright 2020 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDirectContext_DEFINED
+#define GrDirectContext_DEFINED
+
+#include "include/core/SkColor.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypes.h"
+#include "include/gpu/GpuTypes.h"
+#include "include/gpu/GrContextOptions.h"
+#include "include/gpu/GrRecordingContext.h"
+#include "include/gpu/GrTypes.h"
+
+#include <chrono>
+#include <cstddef>
+#include <cstdint>
+#include <memory>
+#include <string_view>
+
+class GrAtlasManager;
+class GrBackendSemaphore;
+class GrBackendFormat;
+class GrBackendTexture;
+class GrBackendRenderTarget;
+class GrClientMappedBufferManager;
+class GrContextThreadSafeProxy;
+class GrDirectContextPriv;
+class GrGpu;
+class GrResourceCache;
+class GrResourceProvider;
+class SkData;
+class SkPixmap;
+class SkTaskGroup;
+class SkTraceMemoryDump;
+enum SkColorType : int;
+enum class SkTextureCompressionType;
+struct GrGLInterface;
+struct GrMockOptions;
+struct GrVkBackendContext; // IWYU pragma: keep
+struct GrD3DBackendContext; // IWYU pragma: keep
+struct GrMtlBackendContext; // IWYU pragma: keep
+
+namespace skgpu {
+ class MutableTextureState;
+#if !defined(SK_ENABLE_OPTIMIZE_SIZE)
+ namespace ganesh { class SmallPathAtlasMgr; }
+#endif
+}
+namespace sktext { namespace gpu { class StrikeCache; } }
+namespace wgpu { class Device; } // IWYU pragma: keep
+
+class SK_API GrDirectContext : public GrRecordingContext {
+public:
+#ifdef SK_GL
+ /**
+ * Creates a GrDirectContext for a backend context. If no GrGLInterface is provided then the
+ * result of GrGLMakeNativeInterface() is used if it succeeds.
+ */
+ static sk_sp<GrDirectContext> MakeGL(sk_sp<const GrGLInterface>, const GrContextOptions&);
+ static sk_sp<GrDirectContext> MakeGL(sk_sp<const GrGLInterface>);
+ static sk_sp<GrDirectContext> MakeGL(const GrContextOptions&);
+ static sk_sp<GrDirectContext> MakeGL();
+#endif
+
+#ifdef SK_VULKAN
+ /**
+ * The Vulkan context (VkQueue, VkDevice, VkInstance) must be kept alive until the returned
+ * GrDirectContext is destroyed. This also means that any objects created with this
+ * GrDirectContext (e.g. SkSurfaces, SkImages, etc.) must also be released as they may hold
+ * refs on the GrDirectContext. Once all these objects and the GrDirectContext are released,
+ * then it is safe to delete the vulkan objects.
+ */
+ static sk_sp<GrDirectContext> MakeVulkan(const GrVkBackendContext&, const GrContextOptions&);
+ static sk_sp<GrDirectContext> MakeVulkan(const GrVkBackendContext&);
+#endif
+
+#ifdef SK_METAL
+ /**
+ * Makes a GrDirectContext which uses Metal as the backend. The GrMtlBackendContext contains a
+ * MTLDevice and MTLCommandQueue which should be used by the backend. These objects must
+ * have their own ref which will be released when the GrMtlBackendContext is destroyed.
+ * Ganesh will take its own ref on the objects which will be released when the GrDirectContext
+ * is destroyed.
+ */
+ static sk_sp<GrDirectContext> MakeMetal(const GrMtlBackendContext&, const GrContextOptions&);
+ static sk_sp<GrDirectContext> MakeMetal(const GrMtlBackendContext&);
+ /**
+ * Deprecated.
+ *
+ * Makes a GrDirectContext which uses Metal as the backend. The device parameter is an
+ * MTLDevice and queue is an MTLCommandQueue which should be used by the backend. These objects
+ * must have a ref on them that can be transferred to Ganesh, which will release the ref
+ * when the GrDirectContext is destroyed.
+ */
+ static sk_sp<GrDirectContext> MakeMetal(void* device, void* queue, const GrContextOptions&);
+ static sk_sp<GrDirectContext> MakeMetal(void* device, void* queue);
+#endif
+
+#ifdef SK_DIRECT3D
+ /**
+ * Makes a GrDirectContext which uses Direct3D as the backend. The Direct3D context
+ * must be kept alive until the returned GrDirectContext is first destroyed or abandoned.
+ */
+ static sk_sp<GrDirectContext> MakeDirect3D(const GrD3DBackendContext&, const GrContextOptions&);
+ static sk_sp<GrDirectContext> MakeDirect3D(const GrD3DBackendContext&);
+#endif
+
+#ifdef SK_DAWN
+ static sk_sp<GrDirectContext> MakeDawn(const wgpu::Device&,
+ const GrContextOptions&);
+ static sk_sp<GrDirectContext> MakeDawn(const wgpu::Device&);
+#endif
+
+ static sk_sp<GrDirectContext> MakeMock(const GrMockOptions*, const GrContextOptions&);
+ static sk_sp<GrDirectContext> MakeMock(const GrMockOptions*);
+
+ ~GrDirectContext() override;
+
+ /**
+ * The context normally assumes that no outsider is setting state
+ * within the underlying 3D API's context/device/whatever. This call informs
+ * the context that the state was modified and it should resend. Shouldn't
+ * be called frequently for good performance.
+ * The flag bits, state, is dependent on which backend is used by the
+ * context, either GL or D3D (possible in future).
+ */
+ void resetContext(uint32_t state = kAll_GrBackendState);
+
+ /**
+ * If the backend is GrBackendApi::kOpenGL, then all texture unit/target combinations for which
+ * the context has modified the bound texture will have texture id 0 bound. This does not
+ * flush the context. Calling resetContext() does not change the set that will be bound
+ * to texture id 0 on the next call to resetGLTextureBindings(). After this is called
+ * all unit/target combinations are considered to have unmodified bindings until the context
+ * subsequently modifies them (meaning if this is called twice in a row with no intervening
+ * context usage then the second call is a no-op.)
+ */
+ void resetGLTextureBindings();
+
+ /**
+ * Abandons all GPU resources and assumes the underlying backend 3D API context is no longer
+ * usable. Call this if you have lost the associated GPU context, and thus internal texture,
+ * buffer, etc. references/IDs are now invalid. Calling this ensures that the destructors of the
+ * context and any of its created resource objects will not make backend 3D API calls. Content
+ * rendered but not previously flushed may be lost. After this function is called all subsequent
+ * calls on the context will fail or be no-ops.
+ *
+ * The typical use case for this function is that the underlying 3D context was lost and further
+ * API calls may crash.
+ *
+ * This call is not valid to be made inside ReleaseProcs passed into SkSurface or SkImages. The
+ * call will simply fail (and assert in debug) if it is called while inside a ReleaseProc.
+ *
+ * For Vulkan, even if the device becomes lost, the VkQueue, VkDevice, or VkInstance used to
+ * create the context must be kept alive even after abandoning the context. Those objects must
+ * live for the lifetime of the context object itself. The reason for this is so that
+ * we can continue to delete any outstanding GrBackendTextures/RenderTargets which must be
+ * cleaned up even in a device lost state.
+ */
+ void abandonContext() override;
+
+ /**
+ * Returns true if the context was abandoned or if the if the backend specific context has
+ * gotten into an unrecoverarble, lost state (e.g. in Vulkan backend if we've gotten a
+ * VK_ERROR_DEVICE_LOST). If the backend context is lost, this call will also abandon this
+ * context.
+ */
+ bool abandoned() override;
+
+ // TODO: Remove this from public after migrating Chrome.
+ sk_sp<GrContextThreadSafeProxy> threadSafeProxy();
+
+ /**
+ * Checks if the underlying 3D API reported an out-of-memory error. If this returns true it is
+ * reset and will return false until another out-of-memory error is reported by the 3D API. If
+ * the context is abandoned then this will report false.
+ *
+ * Currently this is implemented for:
+ *
+ * OpenGL [ES] - Note that client calls to glGetError() may swallow GL_OUT_OF_MEMORY errors and
+ * therefore hide the error from Skia. Also, it is not advised to use this in combination with
+ * enabling GrContextOptions::fSkipGLErrorChecks. That option may prevent the context from ever
+ * checking the GL context for OOM.
+ *
+ * Vulkan - Reports true if VK_ERROR_OUT_OF_HOST_MEMORY or VK_ERROR_OUT_OF_DEVICE_MEMORY has
+ * occurred.
+ */
+ bool oomed();
+
+ /**
+ * This is similar to abandonContext() however the underlying 3D context is not yet lost and
+ * the context will cleanup all allocated resources before returning. After returning it will
+ * assume that the underlying context may no longer be valid.
+ *
+ * The typical use case for this function is that the client is going to destroy the 3D context
+ * but can't guarantee that context will be destroyed first (perhaps because it may be ref'ed
+ * elsewhere by either the client or Skia objects).
+ *
+ * For Vulkan, even if the device becomes lost, the VkQueue, VkDevice, or VkInstance used to
+ * create the context must be alive before calling releaseResourcesAndAbandonContext.
+ */
+ void releaseResourcesAndAbandonContext();
+
+ ///////////////////////////////////////////////////////////////////////////
+ // Resource Cache
+
+ /** DEPRECATED
+ * Return the current GPU resource cache limits.
+ *
+ * @param maxResources If non-null, will be set to -1.
+ * @param maxResourceBytes If non-null, returns maximum number of bytes of
+ * video memory that can be held in the cache.
+ */
+ void getResourceCacheLimits(int* maxResources, size_t* maxResourceBytes) const;
+
+ /**
+ * Return the current GPU resource cache limit in bytes.
+ */
+ size_t getResourceCacheLimit() const;
+
+ /**
+ * Gets the current GPU resource cache usage.
+ *
+ * @param resourceCount If non-null, returns the number of resources that are held in the
+ * cache.
+ * @param maxResourceBytes If non-null, returns the total number of bytes of video memory held
+ * in the cache.
+ */
+ void getResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const;
+
+ /**
+ * Gets the number of bytes in the cache consumed by purgeable (e.g. unlocked) resources.
+ */
+ size_t getResourceCachePurgeableBytes() const;
+
+ /** DEPRECATED
+ * Specify the GPU resource cache limits. If the current cache exceeds the maxResourceBytes
+ * limit, it will be purged (LRU) to keep the cache within the limit.
+ *
+ * @param maxResources Unused.
+ * @param maxResourceBytes The maximum number of bytes of video memory
+ * that can be held in the cache.
+ */
+ void setResourceCacheLimits(int maxResources, size_t maxResourceBytes);
+
+ /**
+ * Specify the GPU resource cache limit. If the cache currently exceeds this limit,
+ * it will be purged (LRU) to keep the cache within the limit.
+ *
+ * @param maxResourceBytes The maximum number of bytes of video memory
+ * that can be held in the cache.
+ */
+ void setResourceCacheLimit(size_t maxResourceBytes);
+
+ /**
+ * Frees GPU created by the context. Can be called to reduce GPU memory
+ * pressure.
+ */
+ void freeGpuResources();
+
+ /**
+ * Purge GPU resources that haven't been used in the past 'msNotUsed' milliseconds or are
+ * otherwise marked for deletion, regardless of whether the context is under budget.
+ *
+ * If 'scratchResourcesOnly' is true all unlocked scratch resources older than 'msNotUsed' will
+ * be purged but the unlocked resources with persistent data will remain. If
+ * 'scratchResourcesOnly' is false then all unlocked resources older than 'msNotUsed' will be
+ * purged.
+ *
+ * @param msNotUsed Only unlocked resources not used in these last milliseconds
+ * will be cleaned up.
+ * @param scratchResourcesOnly If true only unlocked scratch resources will be purged.
+ */
+ void performDeferredCleanup(std::chrono::milliseconds msNotUsed,
+ bool scratchResourcesOnly=false);
+
+ // Temporary compatibility API for Android.
+ void purgeResourcesNotUsedInMs(std::chrono::milliseconds msNotUsed) {
+ this->performDeferredCleanup(msNotUsed);
+ }
+
+ /**
+ * Purge unlocked resources from the cache until the the provided byte count has been reached
+ * or we have purged all unlocked resources. The default policy is to purge in LRU order, but
+ * can be overridden to prefer purging scratch resources (in LRU order) prior to purging other
+ * resource types.
+ *
+ * @param maxBytesToPurge the desired number of bytes to be purged.
+ * @param preferScratchResources If true scratch resources will be purged prior to other
+ * resource types.
+ */
+ void purgeUnlockedResources(size_t bytesToPurge, bool preferScratchResources);
+
+ /**
+ * This entry point is intended for instances where an app has been backgrounded or
+ * suspended.
+ * If 'scratchResourcesOnly' is true all unlocked scratch resources will be purged but the
+ * unlocked resources with persistent data will remain. If 'scratchResourcesOnly' is false
+ * then all unlocked resources will be purged.
+ * In either case, after the unlocked resources are purged a separate pass will be made to
+ * ensure that resource usage is under budget (i.e., even if 'scratchResourcesOnly' is true
+ * some resources with persistent data may be purged to be under budget).
+ *
+ * @param scratchResourcesOnly If true only unlocked scratch resources will be purged prior
+ * enforcing the budget requirements.
+ */
+ void purgeUnlockedResources(bool scratchResourcesOnly);
+
+ /**
+ * Gets the maximum supported texture size.
+ */
+ using GrRecordingContext::maxTextureSize;
+
+ /**
+ * Gets the maximum supported render target size.
+ */
+ using GrRecordingContext::maxRenderTargetSize;
+
+ /**
+ * Can a SkImage be created with the given color type.
+ */
+ using GrRecordingContext::colorTypeSupportedAsImage;
+
+ /**
+ * Can a SkSurface be created with the given color type. To check whether MSAA is supported
+ * use maxSurfaceSampleCountForColorType().
+ */
+ using GrRecordingContext::colorTypeSupportedAsSurface;
+
+ /**
+ * Gets the maximum supported sample count for a color type. 1 is returned if only non-MSAA
+ * rendering is supported for the color type. 0 is returned if rendering to this color type
+ * is not supported at all.
+ */
+ using GrRecordingContext::maxSurfaceSampleCountForColorType;
+
+ ///////////////////////////////////////////////////////////////////////////
+ // Misc.
+
+ /**
+ * Inserts a list of GPU semaphores that the current GPU-backed API must wait on before
+ * executing any more commands on the GPU. If this call returns false, then the GPU back-end
+ * will not wait on any passed in semaphores, and the client will still own the semaphores,
+ * regardless of the value of deleteSemaphoresAfterWait.
+ *
+ * If deleteSemaphoresAfterWait is false then Skia will not delete the semaphores. In this case
+ * it is the client's responsibility to not destroy or attempt to reuse the semaphores until it
+ * knows that Skia has finished waiting on them. This can be done by using finishedProcs on
+ * flush calls.
+ */
+ bool wait(int numSemaphores, const GrBackendSemaphore* waitSemaphores,
+ bool deleteSemaphoresAfterWait = true);
+
+ /**
+ * Call to ensure all drawing to the context has been flushed and submitted to the underlying 3D
+ * API. This is equivalent to calling GrContext::flush with a default GrFlushInfo followed by
+ * GrContext::submit(syncCpu).
+ */
+ void flushAndSubmit(bool syncCpu = false) {
+ this->flush(GrFlushInfo());
+ this->submit(syncCpu);
+ }
+
+ /**
+ * Call to ensure all drawing to the context has been flushed to underlying 3D API specific
+ * objects. A call to `submit` is always required to ensure work is actually sent to
+ * the gpu. Some specific API details:
+ * GL: Commands are actually sent to the driver, but glFlush is never called. Thus some
+ * sync objects from the flush will not be valid until a submission occurs.
+ *
+ * Vulkan/Metal/D3D/Dawn: Commands are recorded to the backend APIs corresponding command
+ * buffer or encoder objects. However, these objects are not sent to the gpu until a
+ * submission occurs.
+ *
+ * If the return is GrSemaphoresSubmitted::kYes, only initialized GrBackendSemaphores will be
+ * submitted to the gpu during the next submit call (it is possible Skia failed to create a
+ * subset of the semaphores). The client should not wait on these semaphores until after submit
+ * has been called, and must keep them alive until then. If this call returns
+ * GrSemaphoresSubmitted::kNo, the GPU backend will not submit any semaphores to be signaled on
+ * the GPU. Thus the client should not have the GPU wait on any of the semaphores passed in with
+ * the GrFlushInfo. Regardless of whether semaphores were submitted to the GPU or not, the
+ * client is still responsible for deleting any initialized semaphores.
+ * Regardleess of semaphore submission the context will still be flushed. It should be
+ * emphasized that a return value of GrSemaphoresSubmitted::kNo does not mean the flush did not
+ * happen. It simply means there were no semaphores submitted to the GPU. A caller should only
+ * take this as a failure if they passed in semaphores to be submitted.
+ */
+ GrSemaphoresSubmitted flush(const GrFlushInfo& info);
+
+ void flush() { this->flush({}); }
+
+ /**
+ * Submit outstanding work to the gpu from all previously un-submitted flushes. The return
+ * value of the submit will indicate whether or not the submission to the GPU was successful.
+ *
+ * If the call returns true, all previously passed in semaphores in flush calls will have been
+ * submitted to the GPU and they can safely be waited on. The caller should wait on those
+ * semaphores or perform some other global synchronization before deleting the semaphores.
+ *
+ * If it returns false, then those same semaphores will not have been submitted and we will not
+ * try to submit them again. The caller is free to delete the semaphores at any time.
+ *
+ * If the syncCpu flag is true this function will return once the gpu has finished with all
+ * submitted work.
+ */
+ bool submit(bool syncCpu = false);
+
+ /**
+ * Checks whether any asynchronous work is complete and if so calls related callbacks.
+ */
+ void checkAsyncWorkCompletion();
+
+ /** Enumerates all cached GPU resources and dumps their memory to traceMemoryDump. */
+ // Chrome is using this!
+ void dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const;
+
+ bool supportsDistanceFieldText() const;
+
+ void storeVkPipelineCacheData();
+
+ /**
+ * Retrieve the default GrBackendFormat for a given SkColorType and renderability.
+ * It is guaranteed that this backend format will be the one used by the following
+ * SkColorType and SkSurfaceCharacterization-based createBackendTexture methods.
+ *
+ * The caller should check that the returned format is valid.
+ */
+ using GrRecordingContext::defaultBackendFormat;
+
+ /**
+ * The explicitly allocated backend texture API allows clients to use Skia to create backend
+ * objects outside of Skia proper (i.e., Skia's caching system will not know about them.)
+ *
+ * It is the client's responsibility to delete all these objects (using deleteBackendTexture)
+ * before deleting the context used to create them. If the backend is Vulkan, the textures must
+ * be deleted before abandoning the context as well. Additionally, clients should only delete
+ * these objects on the thread for which that context is active.
+ *
+ * The client is responsible for ensuring synchronization between different uses
+ * of the backend object (i.e., wrapping it in a surface, rendering to it, deleting the
+ * surface, rewrapping it in a image and drawing the image will require explicit
+ * synchronization on the client's part).
+ */
+
+ /**
+ * If possible, create an uninitialized backend texture. The client should ensure that the
+ * returned backend texture is valid.
+ * For the Vulkan backend the layout of the created VkImage will be:
+ * VK_IMAGE_LAYOUT_UNDEFINED.
+ */
+ GrBackendTexture createBackendTexture(int width,
+ int height,
+ const GrBackendFormat&,
+ GrMipmapped,
+ GrRenderable,
+ GrProtected = GrProtected::kNo,
+ std::string_view label = {});
+
+ /**
+ * If possible, create an uninitialized backend texture. The client should ensure that the
+ * returned backend texture is valid.
+ * If successful, the created backend texture will be compatible with the provided
+ * SkColorType.
+ * For the Vulkan backend the layout of the created VkImage will be:
+ * VK_IMAGE_LAYOUT_UNDEFINED.
+ */
+ GrBackendTexture createBackendTexture(int width, int height,
+ SkColorType,
+ GrMipmapped,
+ GrRenderable,
+ GrProtected = GrProtected::kNo,
+ std::string_view label = {});
+
+ /**
+ * If possible, create a backend texture initialized to a particular color. The client should
+ * ensure that the returned backend texture is valid. The client can pass in a finishedProc
+ * to be notified when the data has been uploaded by the gpu and the texture can be deleted. The
+ * client is required to call `submit` to send the upload work to the gpu. The
+ * finishedProc will always get called even if we failed to create the GrBackendTexture.
+ * For the Vulkan backend the layout of the created VkImage will be:
+ * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
+ */
+ GrBackendTexture createBackendTexture(int width, int height,
+ const GrBackendFormat&,
+ const SkColor4f& color,
+ GrMipmapped,
+ GrRenderable,
+ GrProtected = GrProtected::kNo,
+ GrGpuFinishedProc finishedProc = nullptr,
+ GrGpuFinishedContext finishedContext = nullptr,
+ std::string_view label = {});
+
+ /**
+ * If possible, create a backend texture initialized to a particular color. The client should
+ * ensure that the returned backend texture is valid. The client can pass in a finishedProc
+ * to be notified when the data has been uploaded by the gpu and the texture can be deleted. The
+ * client is required to call `submit` to send the upload work to the gpu. The
+ * finishedProc will always get called even if we failed to create the GrBackendTexture.
+ * If successful, the created backend texture will be compatible with the provided
+ * SkColorType.
+ * For the Vulkan backend the layout of the created VkImage will be:
+ * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
+ */
+ GrBackendTexture createBackendTexture(int width, int height,
+ SkColorType,
+ const SkColor4f& color,
+ GrMipmapped,
+ GrRenderable,
+ GrProtected = GrProtected::kNo,
+ GrGpuFinishedProc finishedProc = nullptr,
+ GrGpuFinishedContext finishedContext = nullptr,
+ std::string_view label = {});
+
+ /**
+ * If possible, create a backend texture initialized with the provided pixmap data. The client
+ * should ensure that the returned backend texture is valid. The client can pass in a
+ * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
+ * deleted. The client is required to call `submit` to send the upload work to the gpu.
+ * The finishedProc will always get called even if we failed to create the GrBackendTexture.
+ * If successful, the created backend texture will be compatible with the provided
+ * pixmap(s). Compatible, in this case, means that the backend format will be the result
+ * of calling defaultBackendFormat on the base pixmap's colortype. The src data can be deleted
+ * when this call returns.
+ * If numLevels is 1 a non-mipmapped texture will result. If a mipmapped texture is desired
+ * the data for all the mipmap levels must be provided. In the mipmapped case all the
+ * colortypes of the provided pixmaps must be the same. Additionally, all the miplevels
+ * must be sized correctly (please see SkMipmap::ComputeLevelSize and ComputeLevelCount). The
+ * GrSurfaceOrigin controls whether the pixmap data is vertically flipped in the texture.
+ * Note: the pixmap's alphatypes and colorspaces are ignored.
+ * For the Vulkan backend the layout of the created VkImage will be:
+ * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
+ */
+ GrBackendTexture createBackendTexture(const SkPixmap srcData[],
+ int numLevels,
+ GrSurfaceOrigin,
+ GrRenderable,
+ GrProtected,
+ GrGpuFinishedProc finishedProc = nullptr,
+ GrGpuFinishedContext finishedContext = nullptr,
+ std::string_view label = {});
+
+ /**
+ * Convenience version createBackendTexture() that takes just a base level pixmap.
+ */
+ GrBackendTexture createBackendTexture(const SkPixmap& srcData,
+ GrSurfaceOrigin textureOrigin,
+ GrRenderable renderable,
+ GrProtected isProtected,
+ GrGpuFinishedProc finishedProc = nullptr,
+ GrGpuFinishedContext finishedContext = nullptr,
+ std::string_view label = {});
+
+ // Deprecated versions that do not take origin and assume top-left.
+ GrBackendTexture createBackendTexture(const SkPixmap srcData[],
+ int numLevels,
+ GrRenderable renderable,
+ GrProtected isProtected,
+ GrGpuFinishedProc finishedProc = nullptr,
+ GrGpuFinishedContext finishedContext = nullptr,
+ std::string_view label = {});
+
+ GrBackendTexture createBackendTexture(const SkPixmap& srcData,
+ GrRenderable renderable,
+ GrProtected isProtected,
+ GrGpuFinishedProc finishedProc = nullptr,
+ GrGpuFinishedContext finishedContext = nullptr,
+ std::string_view label = {});
+
+ /**
+ * If possible, updates a backend texture to be filled to a particular color. The client should
+ * check the return value to see if the update was successful. The client can pass in a
+ * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
+ * deleted. The client is required to call `submit` to send the upload work to the gpu.
+ * The finishedProc will always get called even if we failed to update the GrBackendTexture.
+ * For the Vulkan backend after a successful update the layout of the created VkImage will be:
+ * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
+ */
+ bool updateBackendTexture(const GrBackendTexture&,
+ const SkColor4f& color,
+ GrGpuFinishedProc finishedProc,
+ GrGpuFinishedContext finishedContext);
+
+ /**
+ * If possible, updates a backend texture to be filled to a particular color. The data in
+ * GrBackendTexture and passed in color is interpreted with respect to the passed in
+ * SkColorType. The client should check the return value to see if the update was successful.
+ * The client can pass in a finishedProc to be notified when the data has been uploaded by the
+ * gpu and the texture can be deleted. The client is required to call `submit` to send
+ * the upload work to the gpu. The finishedProc will always get called even if we failed to
+ * update the GrBackendTexture.
+ * For the Vulkan backend after a successful update the layout of the created VkImage will be:
+ * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
+ */
+ bool updateBackendTexture(const GrBackendTexture&,
+ SkColorType skColorType,
+ const SkColor4f& color,
+ GrGpuFinishedProc finishedProc,
+ GrGpuFinishedContext finishedContext);
+
+ /**
+ * If possible, updates a backend texture filled with the provided pixmap data. The client
+ * should check the return value to see if the update was successful. The client can pass in a
+ * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
+ * deleted. The client is required to call `submit` to send the upload work to the gpu.
+ * The finishedProc will always get called even if we failed to create the GrBackendTexture.
+ * The backend texture must be compatible with the provided pixmap(s). Compatible, in this case,
+ * means that the backend format is compatible with the base pixmap's colortype. The src data
+ * can be deleted when this call returns.
+ * If the backend texture is mip mapped, the data for all the mipmap levels must be provided.
+ * In the mipmapped case all the colortypes of the provided pixmaps must be the same.
+ * Additionally, all the miplevels must be sized correctly (please see
+ * SkMipmap::ComputeLevelSize and ComputeLevelCount). The GrSurfaceOrigin controls whether the
+ * pixmap data is vertically flipped in the texture.
+ * Note: the pixmap's alphatypes and colorspaces are ignored.
+ * For the Vulkan backend after a successful update the layout of the created VkImage will be:
+ * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
+ */
+ bool updateBackendTexture(const GrBackendTexture&,
+ const SkPixmap srcData[],
+ int numLevels,
+ GrSurfaceOrigin = kTopLeft_GrSurfaceOrigin,
+ GrGpuFinishedProc finishedProc = nullptr,
+ GrGpuFinishedContext finishedContext = nullptr);
+
+ /**
+ * Convenience version of updateBackendTexture that takes just a base level pixmap.
+ */
+ bool updateBackendTexture(const GrBackendTexture& texture,
+ const SkPixmap& srcData,
+ GrSurfaceOrigin textureOrigin = kTopLeft_GrSurfaceOrigin,
+ GrGpuFinishedProc finishedProc = nullptr,
+ GrGpuFinishedContext finishedContext = nullptr) {
+ return this->updateBackendTexture(texture,
+ &srcData,
+ 1,
+ textureOrigin,
+ finishedProc,
+ finishedContext);
+ }
+
+ // Deprecated version that does not take origin and assumes top-left.
+ bool updateBackendTexture(const GrBackendTexture& texture,
+ const SkPixmap srcData[],
+ int numLevels,
+ GrGpuFinishedProc finishedProc,
+ GrGpuFinishedContext finishedContext);
+
+ /**
+ * Retrieve the GrBackendFormat for a given SkTextureCompressionType. This is
+ * guaranteed to match the backend format used by the following
+ * createCompressedBackendTexture methods that take a CompressionType.
+ *
+ * The caller should check that the returned format is valid.
+ */
+ using GrRecordingContext::compressedBackendFormat;
+
+ /**
+ *If possible, create a compressed backend texture initialized to a particular color. The
+ * client should ensure that the returned backend texture is valid. The client can pass in a
+ * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
+ * deleted. The client is required to call `submit` to send the upload work to the gpu.
+ * The finishedProc will always get called even if we failed to create the GrBackendTexture.
+ * For the Vulkan backend the layout of the created VkImage will be:
+ * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
+ */
+ GrBackendTexture createCompressedBackendTexture(int width, int height,
+ const GrBackendFormat&,
+ const SkColor4f& color,
+ GrMipmapped,
+ GrProtected = GrProtected::kNo,
+ GrGpuFinishedProc finishedProc = nullptr,
+ GrGpuFinishedContext finishedContext = nullptr);
+
+ GrBackendTexture createCompressedBackendTexture(int width, int height,
+ SkTextureCompressionType,
+ const SkColor4f& color,
+ GrMipmapped,
+ GrProtected = GrProtected::kNo,
+ GrGpuFinishedProc finishedProc = nullptr,
+ GrGpuFinishedContext finishedContext = nullptr);
+
+ /**
+ * If possible, create a backend texture initialized with the provided raw data. The client
+ * should ensure that the returned backend texture is valid. The client can pass in a
+ * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
+ * deleted. The client is required to call `submit` to send the upload work to the gpu.
+ * The finishedProc will always get called even if we failed to create the GrBackendTexture
+ * If numLevels is 1 a non-mipmapped texture will result. If a mipmapped texture is desired
+ * the data for all the mipmap levels must be provided. Additionally, all the miplevels
+ * must be sized correctly (please see SkMipmap::ComputeLevelSize and ComputeLevelCount).
+ * For the Vulkan backend the layout of the created VkImage will be:
+ * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
+ */
+ GrBackendTexture createCompressedBackendTexture(int width, int height,
+ const GrBackendFormat&,
+ const void* data, size_t dataSize,
+ GrMipmapped,
+ GrProtected = GrProtected::kNo,
+ GrGpuFinishedProc finishedProc = nullptr,
+ GrGpuFinishedContext finishedContext = nullptr);
+
+ GrBackendTexture createCompressedBackendTexture(int width, int height,
+ SkTextureCompressionType,
+ const void* data, size_t dataSize,
+ GrMipmapped,
+ GrProtected = GrProtected::kNo,
+ GrGpuFinishedProc finishedProc = nullptr,
+ GrGpuFinishedContext finishedContext = nullptr);
+
+ /**
+ * If possible, updates a backend texture filled with the provided color. If the texture is
+ * mipmapped, all levels of the mip chain will be updated to have the supplied color. The client
+ * should check the return value to see if the update was successful. The client can pass in a
+ * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
+ * deleted. The client is required to call `submit` to send the upload work to the gpu.
+ * The finishedProc will always get called even if we failed to create the GrBackendTexture.
+ * For the Vulkan backend after a successful update the layout of the created VkImage will be:
+ * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
+ */
+ bool updateCompressedBackendTexture(const GrBackendTexture&,
+ const SkColor4f& color,
+ GrGpuFinishedProc finishedProc,
+ GrGpuFinishedContext finishedContext);
+
+ /**
+ * If possible, updates a backend texture filled with the provided raw data. The client
+ * should check the return value to see if the update was successful. The client can pass in a
+ * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
+ * deleted. The client is required to call `submit` to send the upload work to the gpu.
+ * The finishedProc will always get called even if we failed to create the GrBackendTexture.
+ * If a mipmapped texture is passed in, the data for all the mipmap levels must be provided.
+ * Additionally, all the miplevels must be sized correctly (please see
+ * SkMipMap::ComputeLevelSize and ComputeLevelCount).
+ * For the Vulkan backend after a successful update the layout of the created VkImage will be:
+ * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
+ */
+ bool updateCompressedBackendTexture(const GrBackendTexture&,
+ const void* data,
+ size_t dataSize,
+ GrGpuFinishedProc finishedProc,
+ GrGpuFinishedContext finishedContext);
+
+ /**
+ * Updates the state of the GrBackendTexture/RenderTarget to have the passed in
+ * skgpu::MutableTextureState. All objects that wrap the backend surface (i.e. SkSurfaces and
+ * SkImages) will also be aware of this state change. This call does not submit the state change
+ * to the gpu, but requires the client to call `submit` to send it to the GPU. The work
+ * for this call is ordered linearly with all other calls that require GrContext::submit to be
+ * called (e.g updateBackendTexture and flush). If finishedProc is not null then it will be
+ * called with finishedContext after the state transition is known to have occurred on the GPU.
+ *
+ * See skgpu::MutableTextureState to see what state can be set via this call.
+ *
+ * If the backend API is Vulkan, the caller can set the skgpu::MutableTextureState's
+ * VkImageLayout to VK_IMAGE_LAYOUT_UNDEFINED or queueFamilyIndex to VK_QUEUE_FAMILY_IGNORED to
+ * tell Skia to not change those respective states.
+ *
+ * If previousState is not null and this returns true, then Skia will have filled in
+ * previousState to have the values of the state before this call.
+ */
+ bool setBackendTextureState(const GrBackendTexture&,
+ const skgpu::MutableTextureState&,
+ skgpu::MutableTextureState* previousState = nullptr,
+ GrGpuFinishedProc finishedProc = nullptr,
+ GrGpuFinishedContext finishedContext = nullptr);
+ bool setBackendRenderTargetState(const GrBackendRenderTarget&,
+ const skgpu::MutableTextureState&,
+ skgpu::MutableTextureState* previousState = nullptr,
+ GrGpuFinishedProc finishedProc = nullptr,
+ GrGpuFinishedContext finishedContext = nullptr);
+
+ void deleteBackendTexture(GrBackendTexture);
+
+ // This interface allows clients to pre-compile shaders and populate the runtime program cache.
+ // The key and data blobs should be the ones passed to the PersistentCache, in SkSL format.
+ //
+ // Steps to use this API:
+ //
+ // 1) Create a GrDirectContext as normal, but set fPersistentCache on GrContextOptions to
+ // something that will save the cached shader blobs. Set fShaderCacheStrategy to kSkSL. This
+ // will ensure that the blobs are SkSL, and are suitable for pre-compilation.
+ // 2) Run your application, and save all of the key/data pairs that are fed to the cache.
+ //
+ // 3) Switch over to shipping your application. Include the key/data pairs from above.
+ // 4) At startup (or any convenient time), call precompileShader for each key/data pair.
+ // This will compile the SkSL to create a GL program, and populate the runtime cache.
+ //
+ // This is only guaranteed to work if the context/device used in step #2 are created in the
+ // same way as the one used in step #4, and the same GrContextOptions are specified.
+ // Using cached shader blobs on a different device or driver are undefined.
+ bool precompileShader(const SkData& key, const SkData& data);
+
+#ifdef SK_ENABLE_DUMP_GPU
+ /** Returns a string with detailed information about the context & GPU, in JSON format. */
+ SkString dump() const;
+#endif
+
+ class DirectContextID {
+ public:
+ static GrDirectContext::DirectContextID Next();
+
+ DirectContextID() : fID(SK_InvalidUniqueID) {}
+
+ bool operator==(const DirectContextID& that) const { return fID == that.fID; }
+ bool operator!=(const DirectContextID& that) const { return !(*this == that); }
+
+ void makeInvalid() { fID = SK_InvalidUniqueID; }
+ bool isValid() const { return fID != SK_InvalidUniqueID; }
+
+ private:
+ constexpr DirectContextID(uint32_t id) : fID(id) {}
+ uint32_t fID;
+ };
+
+ DirectContextID directContextID() const { return fDirectContextID; }
+
+ // Provides access to functions that aren't part of the public API.
+ GrDirectContextPriv priv();
+ const GrDirectContextPriv priv() const; // NOLINT(readability-const-return-type)
+
+protected:
+ GrDirectContext(GrBackendApi backend, const GrContextOptions& options);
+
+ bool init() override;
+
+ GrAtlasManager* onGetAtlasManager() { return fAtlasManager.get(); }
+#if !defined(SK_ENABLE_OPTIMIZE_SIZE)
+ skgpu::ganesh::SmallPathAtlasMgr* onGetSmallPathAtlasMgr();
+#endif
+
+ GrDirectContext* asDirectContext() override { return this; }
+
+private:
+ // This call will make sure out work on the GPU is finished and will execute any outstanding
+ // asynchronous work (e.g. calling finished procs, freeing resources, etc.) related to the
+ // outstanding work on the gpu. The main use currently for this function is when tearing down or
+ // abandoning the context.
+ //
+ // When we finish up work on the GPU it could trigger callbacks to the client. In the case we
+ // are abandoning the context we don't want the client to be able to use the GrDirectContext to
+ // issue more commands during the callback. Thus before calling this function we set the
+ // GrDirectContext's state to be abandoned. However, we need to be able to get by the abaonded
+ // check in the call to know that it is safe to execute this. The shouldExecuteWhileAbandoned
+ // bool is used for this signal.
+ void syncAllOutstandingGpuWork(bool shouldExecuteWhileAbandoned);
+
+ // This delete callback needs to be the first thing on the GrDirectContext so that it is the
+ // last thing destroyed. The callback may signal the client to clean up things that may need
+ // to survive the lifetime of some of the other objects on the GrDirectCotnext. So make sure
+ // we don't call it until all else has been destroyed.
+ class DeleteCallbackHelper {
+ public:
+ DeleteCallbackHelper(GrDirectContextDestroyedContext context,
+ GrDirectContextDestroyedProc proc)
+ : fContext(context), fProc(proc) {}
+
+ ~DeleteCallbackHelper() {
+ if (fProc) {
+ fProc(fContext);
+ }
+ }
+
+ private:
+ GrDirectContextDestroyedContext fContext;
+ GrDirectContextDestroyedProc fProc;
+ };
+ std::unique_ptr<DeleteCallbackHelper> fDeleteCallbackHelper;
+
+ const DirectContextID fDirectContextID;
+ // fTaskGroup must appear before anything that uses it (e.g. fGpu), so that it is destroyed
+ // after all of its users. Clients of fTaskGroup will generally want to ensure that they call
+ // wait() on it as they are being destroyed, to avoid the possibility of pending tasks being
+ // invoked after objects they depend upon have already been destroyed.
+ std::unique_ptr<SkTaskGroup> fTaskGroup;
+ std::unique_ptr<sktext::gpu::StrikeCache> fStrikeCache;
+ sk_sp<GrGpu> fGpu;
+ std::unique_ptr<GrResourceCache> fResourceCache;
+ std::unique_ptr<GrResourceProvider> fResourceProvider;
+
+ // This is incremented before we start calling ReleaseProcs from GrSurfaces and decremented
+ // after. A ReleaseProc may trigger code causing another resource to get freed so we to track
+ // the count to know if we in a ReleaseProc at any level. When this is set to a value greated
+ // than zero we will not allow abandonContext calls to be made on the context.
+ int fInsideReleaseProcCnt = 0;
+
+ bool fDidTestPMConversions;
+ // true if the PM/UPM conversion succeeded; false otherwise
+ bool fPMUPMConversionsRoundTrip;
+
+ GrContextOptions::PersistentCache* fPersistentCache;
+
+ std::unique_ptr<GrClientMappedBufferManager> fMappedBufferManager;
+ std::unique_ptr<GrAtlasManager> fAtlasManager;
+
+#if !defined(SK_ENABLE_OPTIMIZE_SIZE)
+ std::unique_ptr<skgpu::ganesh::SmallPathAtlasMgr> fSmallPathAtlasMgr;
+#endif
+
+ friend class GrDirectContextPriv;
+};
+
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrDriverBugWorkarounds.h b/gfx/skia/skia/include/gpu/GrDriverBugWorkarounds.h
new file mode 100644
index 0000000000..1aa995c791
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrDriverBugWorkarounds.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDriverBugWorkarounds_DEFINED
+#define GrDriverBugWorkarounds_DEFINED
+
+// External embedders of Skia can override this to use their own list
+// of workaround names.
+#ifdef SK_GPU_WORKAROUNDS_HEADER
+#include SK_GPU_WORKAROUNDS_HEADER
+#else
+// To regenerate this file, set gn arg "skia_generate_workarounds = true"
+// or invoke `bazel run //tools:generate_workarounds`
+// This is not rebuilt by default to avoid embedders having to have extra
+// build steps.
+#include "include/gpu/GrDriverBugWorkaroundsAutogen.h"
+#endif
+
+#include "include/core/SkTypes.h"
+
+#include <stdint.h>
+#include <vector>
+
+enum GrDriverBugWorkaroundType {
+#define GPU_OP(type, name) type,
+ GPU_DRIVER_BUG_WORKAROUNDS(GPU_OP)
+#undef GPU_OP
+ NUMBER_OF_GPU_DRIVER_BUG_WORKAROUND_TYPES
+};
+
+class SK_API GrDriverBugWorkarounds {
+ public:
+ GrDriverBugWorkarounds();
+ GrDriverBugWorkarounds(const GrDriverBugWorkarounds&) = default;
+ explicit GrDriverBugWorkarounds(const std::vector<int32_t>& workarounds);
+
+ GrDriverBugWorkarounds& operator=(const GrDriverBugWorkarounds&) = default;
+
+ // Turn on any workarounds listed in |workarounds| (but don't turn any off).
+ void applyOverrides(const GrDriverBugWorkarounds& workarounds);
+
+ ~GrDriverBugWorkarounds();
+
+#define GPU_OP(type, name) bool name = false;
+ GPU_DRIVER_BUG_WORKAROUNDS(GPU_OP)
+#undef GPU_OP
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrDriverBugWorkaroundsAutogen.h b/gfx/skia/skia/include/gpu/GrDriverBugWorkaroundsAutogen.h
new file mode 100644
index 0000000000..d0b96ca80a
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrDriverBugWorkaroundsAutogen.h
@@ -0,0 +1,43 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from build_workaround_header.py
+// DO NOT EDIT!
+
+#define GPU_DRIVER_BUG_WORKAROUNDS(GPU_OP) \
+ GPU_OP(ADD_AND_TRUE_TO_LOOP_CONDITION, \
+ add_and_true_to_loop_condition) \
+ GPU_OP(DISABLE_BLEND_EQUATION_ADVANCED, \
+ disable_blend_equation_advanced) \
+ GPU_OP(DISABLE_DISCARD_FRAMEBUFFER, \
+ disable_discard_framebuffer) \
+ GPU_OP(DISABLE_DUAL_SOURCE_BLENDING_SUPPORT, \
+ disable_dual_source_blending_support) \
+ GPU_OP(DISABLE_TEXTURE_STORAGE, \
+ disable_texture_storage) \
+ GPU_OP(DISALLOW_LARGE_INSTANCED_DRAW, \
+ disallow_large_instanced_draw) \
+ GPU_OP(EMULATE_ABS_INT_FUNCTION, \
+ emulate_abs_int_function) \
+ GPU_OP(FLUSH_ON_FRAMEBUFFER_CHANGE, \
+ flush_on_framebuffer_change) \
+ GPU_OP(FORCE_UPDATE_SCISSOR_STATE_WHEN_BINDING_FBO0, \
+ force_update_scissor_state_when_binding_fbo0) \
+ GPU_OP(GL_CLEAR_BROKEN, \
+ gl_clear_broken) \
+ GPU_OP(MAX_FRAGMENT_UNIFORM_VECTORS_32, \
+ max_fragment_uniform_vectors_32) \
+ GPU_OP(MAX_MSAA_SAMPLE_COUNT_4, \
+ max_msaa_sample_count_4) \
+ GPU_OP(PACK_PARAMETERS_WORKAROUND_WITH_PACK_BUFFER, \
+ pack_parameters_workaround_with_pack_buffer) \
+ GPU_OP(REMOVE_POW_WITH_CONSTANT_EXPONENT, \
+ remove_pow_with_constant_exponent) \
+ GPU_OP(REWRITE_DO_WHILE_LOOPS, \
+ rewrite_do_while_loops) \
+ GPU_OP(UNBIND_ATTACHMENTS_ON_BOUND_RENDER_FBO_DELETE, \
+ unbind_attachments_on_bound_render_fbo_delete) \
+ GPU_OP(UNFOLD_SHORT_CIRCUIT_AS_TERNARY_OPERATION, \
+ unfold_short_circuit_as_ternary_operation) \
+// The End
diff --git a/gfx/skia/skia/include/gpu/GrRecordingContext.h b/gfx/skia/skia/include/gpu/GrRecordingContext.h
new file mode 100644
index 0000000000..b7bd6af920
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrRecordingContext.h
@@ -0,0 +1,286 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrRecordingContext_DEFINED
+#define GrRecordingContext_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkTArray.h"
+#include "include/private/gpu/ganesh/GrImageContext.h"
+
+#if GR_GPU_STATS && GR_TEST_UTILS
+#include <map>
+#include <string>
+#endif
+
+class GrAuditTrail;
+class GrBackendFormat;
+class GrDrawingManager;
+class GrOnFlushCallbackObject;
+class GrMemoryPool;
+class GrProgramDesc;
+class GrProgramInfo;
+class GrProxyProvider;
+class GrRecordingContextPriv;
+class GrSurfaceProxy;
+class GrThreadSafeCache;
+class SkArenaAlloc;
+class SkCapabilities;
+class SkJSONWriter;
+
+namespace sktext::gpu {
+class SubRunAllocator;
+class TextBlobRedrawCoordinator;
+}
+
+#if GR_TEST_UTILS
+class SkString;
+#endif
+
+class GrRecordingContext : public GrImageContext {
+public:
+ ~GrRecordingContext() override;
+
+ SK_API GrBackendFormat defaultBackendFormat(SkColorType ct, GrRenderable renderable) const {
+ return INHERITED::defaultBackendFormat(ct, renderable);
+ }
+
+ /**
+ * Reports whether the GrDirectContext associated with this GrRecordingContext is abandoned.
+ * When called on a GrDirectContext it may actively check whether the underlying 3D API
+ * device/context has been disconnected before reporting the status. If so, calling this
+ * method will transition the GrDirectContext to the abandoned state.
+ */
+ bool abandoned() override { return INHERITED::abandoned(); }
+
+ /*
+ * Can a SkSurface be created with the given color type. To check whether MSAA is supported
+ * use maxSurfaceSampleCountForColorType().
+ */
+ SK_API bool colorTypeSupportedAsSurface(SkColorType colorType) const {
+ if (kR16G16_unorm_SkColorType == colorType ||
+ kA16_unorm_SkColorType == colorType ||
+ kA16_float_SkColorType == colorType ||
+ kR16G16_float_SkColorType == colorType ||
+ kR16G16B16A16_unorm_SkColorType == colorType ||
+ kGray_8_SkColorType == colorType) {
+ return false;
+ }
+
+ return this->maxSurfaceSampleCountForColorType(colorType) > 0;
+ }
+
+ /**
+ * Gets the maximum supported texture size.
+ */
+ SK_API int maxTextureSize() const;
+
+ /**
+ * Gets the maximum supported render target size.
+ */
+ SK_API int maxRenderTargetSize() const;
+
+ /**
+ * Can a SkImage be created with the given color type.
+ */
+ SK_API bool colorTypeSupportedAsImage(SkColorType) const;
+
+ /**
+ * Gets the maximum supported sample count for a color type. 1 is returned if only non-MSAA
+ * rendering is supported for the color type. 0 is returned if rendering to this color type
+ * is not supported at all.
+ */
+ SK_API int maxSurfaceSampleCountForColorType(SkColorType colorType) const {
+ return INHERITED::maxSurfaceSampleCountForColorType(colorType);
+ }
+
+ SK_API sk_sp<const SkCapabilities> skCapabilities() const;
+
+ // Provides access to functions that aren't part of the public API.
+ GrRecordingContextPriv priv();
+ const GrRecordingContextPriv priv() const; // NOLINT(readability-const-return-type)
+
+ // The collection of specialized memory arenas for different types of data recorded by a
+ // GrRecordingContext. Arenas does not maintain ownership of the pools it groups together.
+ class Arenas {
+ public:
+ Arenas(SkArenaAlloc*, sktext::gpu::SubRunAllocator*);
+
+ // For storing pipelines and other complex data as-needed by ops
+ SkArenaAlloc* recordTimeAllocator() { return fRecordTimeAllocator; }
+
+ // For storing GrTextBlob SubRuns
+ sktext::gpu::SubRunAllocator* recordTimeSubRunAllocator() {
+ return fRecordTimeSubRunAllocator;
+ }
+
+ private:
+ SkArenaAlloc* fRecordTimeAllocator;
+ sktext::gpu::SubRunAllocator* fRecordTimeSubRunAllocator;
+ };
+
+protected:
+ friend class GrRecordingContextPriv; // for hidden functions
+ friend class SkDeferredDisplayList; // for OwnedArenas
+ friend class SkDeferredDisplayListPriv; // for ProgramData
+
+ // Like Arenas, but preserves ownership of the underlying pools.
+ class OwnedArenas {
+ public:
+ OwnedArenas(bool ddlRecording);
+ ~OwnedArenas();
+
+ Arenas get();
+
+ OwnedArenas& operator=(OwnedArenas&&);
+
+ private:
+ bool fDDLRecording;
+ std::unique_ptr<SkArenaAlloc> fRecordTimeAllocator;
+ std::unique_ptr<sktext::gpu::SubRunAllocator> fRecordTimeSubRunAllocator;
+ };
+
+ GrRecordingContext(sk_sp<GrContextThreadSafeProxy>, bool ddlRecording);
+
+ bool init() override;
+
+ void abandonContext() override;
+
+ GrDrawingManager* drawingManager();
+
+ // There is no going back from this method. It should only be called to control the timing
+ // during abandon or destruction of the context.
+ void destroyDrawingManager();
+
+ Arenas arenas() { return fArenas.get(); }
+ // This entry point should only be used for DDL creation where we want the ops' lifetime to
+ // match that of the DDL.
+ OwnedArenas&& detachArenas();
+
+ GrProxyProvider* proxyProvider() { return fProxyProvider.get(); }
+ const GrProxyProvider* proxyProvider() const { return fProxyProvider.get(); }
+
+ struct ProgramData {
+ ProgramData(std::unique_ptr<const GrProgramDesc>, const GrProgramInfo*);
+ ProgramData(ProgramData&&); // for SkTArray
+ ProgramData(const ProgramData&) = delete;
+ ~ProgramData();
+
+ const GrProgramDesc& desc() const { return *fDesc; }
+ const GrProgramInfo& info() const { return *fInfo; }
+
+ private:
+ // TODO: store the GrProgramDescs in the 'fRecordTimeData' arena
+ std::unique_ptr<const GrProgramDesc> fDesc;
+ // The program infos should be stored in 'fRecordTimeData' so do not need to be ref
+ // counted or deleted in the destructor.
+ const GrProgramInfo* fInfo = nullptr;
+ };
+
+ // This entry point gives the recording context a chance to cache the provided
+ // programInfo. The DDL context takes this opportunity to store programInfos as a sidecar
+ // to the DDL.
+ virtual void recordProgramInfo(const GrProgramInfo*) {}
+ // This asks the recording context to return any programInfos it may have collected
+ // via the 'recordProgramInfo' call. It is up to the caller to ensure that the lifetime
+ // of the programInfos matches the intended use. For example, in DDL-record mode it
+ // is known that all the programInfos will have been allocated in an arena with the
+ // same lifetime at the DDL itself.
+ virtual void detachProgramData(SkTArray<ProgramData>*) {}
+
+ sktext::gpu::TextBlobRedrawCoordinator* getTextBlobRedrawCoordinator();
+ const sktext::gpu::TextBlobRedrawCoordinator* getTextBlobRedrawCoordinator() const;
+
+ GrThreadSafeCache* threadSafeCache();
+ const GrThreadSafeCache* threadSafeCache() const;
+
+ /**
+ * Registers an object for flush-related callbacks. (See GrOnFlushCallbackObject.)
+ *
+ * NOTE: the drawing manager tracks this object as a raw pointer; it is up to the caller to
+ * ensure its lifetime is tied to that of the context.
+ */
+ void addOnFlushCallbackObject(GrOnFlushCallbackObject*);
+
+ GrRecordingContext* asRecordingContext() override { return this; }
+
+ class Stats {
+ public:
+ Stats() = default;
+
+#if GR_GPU_STATS
+ void reset() { *this = {}; }
+
+ int numPathMasksGenerated() const { return fNumPathMasksGenerated; }
+ void incNumPathMasksGenerated() { fNumPathMasksGenerated++; }
+
+ int numPathMaskCacheHits() const { return fNumPathMaskCacheHits; }
+ void incNumPathMasksCacheHits() { fNumPathMaskCacheHits++; }
+
+#if GR_TEST_UTILS
+ void dump(SkString* out) const;
+ void dumpKeyValuePairs(SkTArray<SkString>* keys, SkTArray<double>* values) const;
+#endif
+
+ private:
+ int fNumPathMasksGenerated{0};
+ int fNumPathMaskCacheHits{0};
+
+#else // GR_GPU_STATS
+ void incNumPathMasksGenerated() {}
+ void incNumPathMasksCacheHits() {}
+
+#if GR_TEST_UTILS
+ void dump(SkString*) const {}
+ void dumpKeyValuePairs(SkTArray<SkString>* keys, SkTArray<double>* values) const {}
+#endif
+#endif // GR_GPU_STATS
+ } fStats;
+
+#if GR_GPU_STATS && GR_TEST_UTILS
+ struct DMSAAStats {
+ void dumpKeyValuePairs(SkTArray<SkString>* keys, SkTArray<double>* values) const;
+ void dump() const;
+ void merge(const DMSAAStats&);
+ int fNumRenderPasses = 0;
+ int fNumMultisampleRenderPasses = 0;
+ std::map<std::string, int> fTriggerCounts;
+ };
+
+ DMSAAStats fDMSAAStats;
+#endif
+
+ Stats* stats() { return &fStats; }
+ const Stats* stats() const { return &fStats; }
+ void dumpJSON(SkJSONWriter*) const;
+
+protected:
+ // Delete last in case other objects call it during destruction.
+ std::unique_ptr<GrAuditTrail> fAuditTrail;
+
+private:
+ OwnedArenas fArenas;
+
+ std::unique_ptr<GrDrawingManager> fDrawingManager;
+ std::unique_ptr<GrProxyProvider> fProxyProvider;
+
+#if GR_TEST_UTILS
+ int fSuppressWarningMessages = 0;
+#endif
+
+ using INHERITED = GrImageContext;
+};
+
+/**
+ * Safely cast a possibly-null base context to direct context.
+ */
+static inline GrDirectContext* GrAsDirectContext(GrContext_Base* base) {
+ return base ? base->asDirectContext() : nullptr;
+}
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrSurfaceInfo.h b/gfx/skia/skia/include/gpu/GrSurfaceInfo.h
new file mode 100644
index 0000000000..e037fb4957
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrSurfaceInfo.h
@@ -0,0 +1,166 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrSurfaceInfo_DEFINED
+#define GrSurfaceInfo_DEFINED
+
+#include "include/gpu/GrTypes.h"
+
+#ifdef SK_GL
+#include "include/private/gpu/ganesh/GrGLTypesPriv.h"
+#endif
+#ifdef SK_VULKAN
+#include "include/private/gpu/ganesh/GrVkTypesPriv.h"
+#endif
+#ifdef SK_DIRECT3D
+#include "include/private/gpu/ganesh/GrD3DTypesMinimal.h"
+struct GrD3DSurfaceInfo;
+#endif
+#ifdef SK_METAL
+#include "include/private/gpu/ganesh/GrMtlTypesPriv.h"
+#endif
+#ifdef SK_DAWN
+#include "include/private/gpu/ganesh/GrDawnTypesPriv.h"
+#endif
+#include "include/private/gpu/ganesh/GrMockTypesPriv.h"
+
+class GrSurfaceInfo {
+public:
+ GrSurfaceInfo() {}
+#ifdef SK_GL
+ GrSurfaceInfo(const GrGLSurfaceInfo& glInfo)
+ : fBackend(GrBackendApi::kOpenGL)
+ , fValid(true)
+ , fSampleCount(glInfo.fSampleCount)
+ , fLevelCount(glInfo.fLevelCount)
+ , fProtected(glInfo.fProtected)
+ , fGLSpec(glInfo) {}
+#endif
+#ifdef SK_VULKAN
+ GrSurfaceInfo(const GrVkSurfaceInfo& vkInfo)
+ : fBackend(GrBackendApi::kVulkan)
+ , fValid(true)
+ , fSampleCount(vkInfo.fSampleCount)
+ , fLevelCount(vkInfo.fLevelCount)
+ , fProtected(vkInfo.fProtected)
+ , fVkSpec(vkInfo) {}
+#endif
+#ifdef SK_DIRECT3D
+ GrSurfaceInfo(const GrD3DSurfaceInfo& d3dInfo);
+#endif
+#ifdef SK_METAL
+ GrSurfaceInfo(const GrMtlSurfaceInfo& mtlInfo)
+ : fBackend(GrBackendApi::kMetal)
+ , fValid(true)
+ , fSampleCount(mtlInfo.fSampleCount)
+ , fLevelCount(mtlInfo.fLevelCount)
+ , fProtected(mtlInfo.fProtected)
+ , fMtlSpec(mtlInfo) {}
+#endif
+#ifdef SK_DAWN
+ GrSurfaceInfo(const GrDawnSurfaceInfo& dawnInfo)
+ : fBackend(GrBackendApi::kDawn)
+ , fValid(true)
+ , fSampleCount(dawnInfo.fSampleCount)
+ , fLevelCount(dawnInfo.fLevelCount)
+ , fProtected(dawnInfo.fProtected)
+ , fDawnSpec(dawnInfo) {}
+#endif
+ GrSurfaceInfo(const GrMockSurfaceInfo& mockInfo)
+ : fBackend(GrBackendApi::kMock)
+ , fValid(true)
+ , fSampleCount(mockInfo.fSampleCount)
+ , fLevelCount(mockInfo.fLevelCount)
+ , fProtected(mockInfo.fProtected)
+ , fMockSpec(mockInfo) {}
+
+ ~GrSurfaceInfo();
+ GrSurfaceInfo(const GrSurfaceInfo&) = default;
+
+ bool isValid() const { return fValid; }
+ GrBackendApi backend() const { return fBackend; }
+
+ uint32_t numSamples() const { return fSampleCount; }
+ uint32_t numMipLevels() const { return fLevelCount; }
+ GrProtected isProtected() const { return fProtected; }
+
+#ifdef SK_GL
+ bool getGLSurfaceInfo(GrGLSurfaceInfo* info) const {
+ if (!this->isValid() || fBackend != GrBackendApi::kOpenGL) {
+ return false;
+ }
+ *info = GrGLTextureSpecToSurfaceInfo(fGLSpec, fSampleCount, fLevelCount, fProtected);
+ return true;
+ }
+#endif
+#ifdef SK_VULKAN
+ bool getVkSurfaceInfo(GrVkSurfaceInfo* info) const {
+ if (!this->isValid() || fBackend != GrBackendApi::kVulkan) {
+ return false;
+ }
+ *info = GrVkImageSpecToSurfaceInfo(fVkSpec, fSampleCount, fLevelCount, fProtected);
+ return true;
+ }
+#endif
+#ifdef SK_DIRECT3D
+ bool getD3DSurfaceInfo(GrD3DSurfaceInfo*) const;
+#endif
+#ifdef SK_METAL
+ bool getMtlSurfaceInfo(GrMtlSurfaceInfo* info) const {
+ if (!this->isValid() || fBackend != GrBackendApi::kMetal) {
+ return false;
+ }
+ *info = GrMtlTextureSpecToSurfaceInfo(fMtlSpec, fSampleCount, fLevelCount, fProtected);
+ return true;
+ }
+#endif
+#ifdef SK_DAWN
+ bool getDawnSurfaceInfo(GrDawnSurfaceInfo* info) const {
+ if (!this->isValid() || fBackend != GrBackendApi::kDawn) {
+ return false;
+ }
+ *info = GrDawnTextureSpecToSurfaceInfo(fDawnSpec, fSampleCount, fLevelCount, fProtected);
+ return true;
+ }
+#endif
+ bool getMockSurfaceInfo(GrMockSurfaceInfo* info) const {
+ if (!this->isValid() || fBackend != GrBackendApi::kMock) {
+ return false;
+ }
+ *info = GrMockTextureSpecToSurfaceInfo(fMockSpec, fSampleCount, fLevelCount, fProtected);
+ return true;
+ }
+
+private:
+ GrBackendApi fBackend = GrBackendApi::kMock;
+ bool fValid = false;
+
+ uint32_t fSampleCount = 1;
+ uint32_t fLevelCount = 0;
+ GrProtected fProtected = GrProtected::kNo;
+
+ union {
+#ifdef SK_GL
+ GrGLTextureSpec fGLSpec;
+#endif
+#ifdef SK_VULKAN
+ GrVkImageSpec fVkSpec;
+#endif
+#ifdef SK_DIRECT3D
+ GrD3DTextureResourceSpecHolder fD3DSpec;
+#endif
+#ifdef SK_METAL
+ GrMtlTextureSpec fMtlSpec;
+#endif
+#ifdef SK_DAWN
+ GrDawnTextureSpec fDawnSpec;
+#endif
+ GrMockTextureSpec fMockSpec;
+ };
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrTypes.h b/gfx/skia/skia/include/gpu/GrTypes.h
new file mode 100644
index 0000000000..177a35a943
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrTypes.h
@@ -0,0 +1,244 @@
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrTypes_DEFINED
+#define GrTypes_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkTo.h" // IWYU pragma: keep
+
+#include <cstddef>
+#include <cstdint>
+class GrBackendSemaphore;
+
+namespace skgpu {
+enum class Mipmapped : bool;
+enum class Protected : bool;
+enum class Renderable : bool;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Wraps a C++11 enum that we use as a bitfield, and enables a limited amount of
+ * masking with type safety. Instantiated with the ~ operator.
+ */
+template<typename TFlags> class GrTFlagsMask {
+public:
+ constexpr explicit GrTFlagsMask(TFlags value) : GrTFlagsMask(static_cast<int>(value)) {}
+ constexpr explicit GrTFlagsMask(int value) : fValue(value) {}
+ constexpr int value() const { return fValue; }
+private:
+ const int fValue;
+};
+
+/**
+ * Defines bitwise operators that make it possible to use an enum class as a
+ * basic bitfield.
+ */
+#define GR_MAKE_BITFIELD_CLASS_OPS(X) \
+ [[maybe_unused]] constexpr GrTFlagsMask<X> operator~(X a) { \
+ return GrTFlagsMask<X>(~static_cast<int>(a)); \
+ } \
+ [[maybe_unused]] constexpr X operator|(X a, X b) { \
+ return static_cast<X>(static_cast<int>(a) | static_cast<int>(b)); \
+ } \
+ [[maybe_unused]] inline X& operator|=(X& a, X b) { \
+ return (a = a | b); \
+ } \
+ [[maybe_unused]] constexpr bool operator&(X a, X b) { \
+ return SkToBool(static_cast<int>(a) & static_cast<int>(b)); \
+ } \
+ [[maybe_unused]] constexpr GrTFlagsMask<X> operator|(GrTFlagsMask<X> a, GrTFlagsMask<X> b) { \
+ return GrTFlagsMask<X>(a.value() | b.value()); \
+ } \
+ [[maybe_unused]] constexpr GrTFlagsMask<X> operator|(GrTFlagsMask<X> a, X b) { \
+ return GrTFlagsMask<X>(a.value() | static_cast<int>(b)); \
+ } \
+ [[maybe_unused]] constexpr GrTFlagsMask<X> operator|(X a, GrTFlagsMask<X> b) { \
+ return GrTFlagsMask<X>(static_cast<int>(a) | b.value()); \
+ } \
+ [[maybe_unused]] constexpr X operator&(GrTFlagsMask<X> a, GrTFlagsMask<X> b) { \
+ return static_cast<X>(a.value() & b.value()); \
+ } \
+ [[maybe_unused]] constexpr X operator&(GrTFlagsMask<X> a, X b) { \
+ return static_cast<X>(a.value() & static_cast<int>(b)); \
+ } \
+ [[maybe_unused]] constexpr X operator&(X a, GrTFlagsMask<X> b) { \
+ return static_cast<X>(static_cast<int>(a) & b.value()); \
+ } \
+ [[maybe_unused]] inline X& operator&=(X& a, GrTFlagsMask<X> b) { \
+ return (a = a & b); \
+ } \
+
+#define GR_DECL_BITFIELD_CLASS_OPS_FRIENDS(X) \
+ friend constexpr GrTFlagsMask<X> operator ~(X); \
+ friend constexpr X operator |(X, X); \
+ friend X& operator |=(X&, X); \
+ friend constexpr bool operator &(X, X); \
+ friend constexpr GrTFlagsMask<X> operator|(GrTFlagsMask<X>, GrTFlagsMask<X>); \
+ friend constexpr GrTFlagsMask<X> operator|(GrTFlagsMask<X>, X); \
+ friend constexpr GrTFlagsMask<X> operator|(X, GrTFlagsMask<X>); \
+ friend constexpr X operator&(GrTFlagsMask<X>, GrTFlagsMask<X>); \
+ friend constexpr X operator&(GrTFlagsMask<X>, X); \
+ friend constexpr X operator&(X, GrTFlagsMask<X>); \
+ friend X& operator &=(X&, GrTFlagsMask<X>)
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Possible 3D APIs that may be used by Ganesh.
+ */
+enum class GrBackendApi : unsigned {
+ kOpenGL,
+ kVulkan,
+ kMetal,
+ kDirect3D,
+ kDawn,
+ /**
+ * Mock is a backend that does not draw anything. It is used for unit tests
+ * and to measure CPU overhead.
+ */
+ kMock,
+
+ /**
+ * Added here to support the legacy GrBackend enum value and clients who referenced it using
+ * GrBackend::kOpenGL_GrBackend.
+ */
+ kOpenGL_GrBackend = kOpenGL,
+};
+
+/**
+ * Previously the above enum was not an enum class but a normal enum. To support the legacy use of
+ * the enum values we define them below so that no clients break.
+ */
+typedef GrBackendApi GrBackend;
+
+static constexpr GrBackendApi kMetal_GrBackend = GrBackendApi::kMetal;
+static constexpr GrBackendApi kVulkan_GrBackend = GrBackendApi::kVulkan;
+static constexpr GrBackendApi kMock_GrBackend = GrBackendApi::kMock;
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Used to say whether a texture has mip levels allocated or not.
+ */
+/** Deprecated legacy alias of skgpu::Mipmapped. */
+using GrMipmapped = skgpu::Mipmapped;
+/** Deprecated legacy alias of skgpu::Mipmapped. */
+using GrMipMapped = skgpu::Mipmapped;
+
+/*
+ * Can a GrBackendObject be rendered to?
+ */
+using GrRenderable = skgpu::Renderable;
+
+/*
+ * Used to say whether texture is backed by protected memory.
+ */
+using GrProtected = skgpu::Protected;
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * GPU SkImage and SkSurfaces can be stored such that (0, 0) in texture space may correspond to
+ * either the top-left or bottom-left content pixel.
+ */
+enum GrSurfaceOrigin : int {
+ kTopLeft_GrSurfaceOrigin,
+ kBottomLeft_GrSurfaceOrigin,
+};
+
+/**
+ * A GrContext's cache of backend context state can be partially invalidated.
+ * These enums are specific to the GL backend and we'd add a new set for an alternative backend.
+ */
+enum GrGLBackendState {
+ kRenderTarget_GrGLBackendState = 1 << 0,
+ // Also includes samplers bound to texture units.
+ kTextureBinding_GrGLBackendState = 1 << 1,
+ // View state stands for scissor and viewport
+ kView_GrGLBackendState = 1 << 2,
+ kBlend_GrGLBackendState = 1 << 3,
+ kMSAAEnable_GrGLBackendState = 1 << 4,
+ kVertex_GrGLBackendState = 1 << 5,
+ kStencil_GrGLBackendState = 1 << 6,
+ kPixelStore_GrGLBackendState = 1 << 7,
+ kProgram_GrGLBackendState = 1 << 8,
+ kFixedFunction_GrGLBackendState = 1 << 9,
+ kMisc_GrGLBackendState = 1 << 10,
+ kALL_GrGLBackendState = 0xffff
+};
+
+/**
+ * This value translates to reseting all the context state for any backend.
+ */
+static const uint32_t kAll_GrBackendState = 0xffffffff;
+
+typedef void* GrGpuFinishedContext;
+typedef void (*GrGpuFinishedProc)(GrGpuFinishedContext finishedContext);
+
+typedef void* GrGpuSubmittedContext;
+typedef void (*GrGpuSubmittedProc)(GrGpuSubmittedContext submittedContext, bool success);
+
+typedef void* GrDirectContextDestroyedContext;
+typedef void (*GrDirectContextDestroyedProc)(GrDirectContextDestroyedContext destroyedContext);
+
+/**
+ * Struct to supply options to flush calls.
+ *
+ * After issuing all commands, fNumSemaphore semaphores will be signaled by the gpu. The client
+ * passes in an array of fNumSemaphores GrBackendSemaphores. In general these GrBackendSemaphore's
+ * can be either initialized or not. If they are initialized, the backend uses the passed in
+ * semaphore. If it is not initialized, a new semaphore is created and the GrBackendSemaphore
+ * object is initialized with that semaphore. The semaphores are not sent to the GPU until the next
+ * GrContext::submit call is made. See the GrContext::submit for more information.
+ *
+ * The client will own and be responsible for deleting the underlying semaphores that are stored
+ * and returned in initialized GrBackendSemaphore objects. The GrBackendSemaphore objects
+ * themselves can be deleted as soon as this function returns.
+ *
+ * If a finishedProc is provided, the finishedProc will be called when all work submitted to the gpu
+ * from this flush call and all previous flush calls has finished on the GPU. If the flush call
+ * fails due to an error and nothing ends up getting sent to the GPU, the finished proc is called
+ * immediately.
+ *
+ * If a submittedProc is provided, the submittedProc will be called when all work from this flush
+ * call is submitted to the GPU. If the flush call fails due to an error and nothing will get sent
+ * to the GPU, the submitted proc is called immediately. It is possibly that when work is finally
+ * submitted, that the submission actual fails. In this case we will not reattempt to do the
+ * submission. Skia notifies the client of these via the success bool passed into the submittedProc.
+ * The submittedProc is useful to the client to know when semaphores that were sent with the flush
+ * have actually been submitted to the GPU so that they can be waited on (or deleted if the submit
+ * fails).
+ * Note about GL: In GL work gets sent to the driver immediately during the flush call, but we don't
+ * really know when the driver sends the work to the GPU. Therefore, we treat the submitted proc as
+ * we do in other backends. It will be called when the next GrContext::submit is called after the
+ * flush (or possibly during the flush if there is no work to be done for the flush). The main use
+ * case for the submittedProc is to know when semaphores have been sent to the GPU and even in GL
+ * it is required to call GrContext::submit to flush them. So a client should be able to treat all
+ * backend APIs the same in terms of how the submitted procs are treated.
+ */
+struct GrFlushInfo {
+ size_t fNumSemaphores = 0;
+ GrBackendSemaphore* fSignalSemaphores = nullptr;
+ GrGpuFinishedProc fFinishedProc = nullptr;
+ GrGpuFinishedContext fFinishedContext = nullptr;
+ GrGpuSubmittedProc fSubmittedProc = nullptr;
+ GrGpuSubmittedContext fSubmittedContext = nullptr;
+};
+
+/**
+ * Enum used as return value when flush with semaphores so the client knows whether the valid
+ * semaphores will be submitted on the next GrContext::submit call.
+ */
+enum class GrSemaphoresSubmitted : bool {
+ kNo = false,
+ kYes = true
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrYUVABackendTextures.h b/gfx/skia/skia/include/gpu/GrYUVABackendTextures.h
new file mode 100644
index 0000000000..edcde7e533
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrYUVABackendTextures.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrYUVABackendTextures_DEFINED
+#define GrYUVABackendTextures_DEFINED
+
+#include "include/core/SkYUVAInfo.h"
+#include "include/gpu/GrBackendSurface.h"
+
+#include <tuple>
+
+/**
+ * A description of a set GrBackendTextures that hold the planar data described by a SkYUVAInfo.
+ */
+class SK_API GrYUVABackendTextureInfo {
+public:
+ static constexpr auto kMaxPlanes = SkYUVAInfo::kMaxPlanes;
+
+ /** Default GrYUVABackendTextureInfo is invalid. */
+ GrYUVABackendTextureInfo() = default;
+
+ /**
+ * Initializes a GrYUVABackendTextureInfo to describe a set of textures that can store the
+ * planes indicated by the SkYUVAInfo. The texture dimensions are taken from the SkYUVAInfo's
+ * plane dimensions. All the described textures share a common origin. The planar image this
+ * describes will be mip mapped if all the textures are individually mip mapped as indicated
+ * by GrMipmapped. This will produce an invalid result (return false from isValid()) if the
+ * passed formats' channels don't agree with SkYUVAInfo.
+ */
+ GrYUVABackendTextureInfo(const SkYUVAInfo&,
+ const GrBackendFormat[kMaxPlanes],
+ GrMipmapped,
+ GrSurfaceOrigin);
+
+ GrYUVABackendTextureInfo(const GrYUVABackendTextureInfo&) = default;
+
+ GrYUVABackendTextureInfo& operator=(const GrYUVABackendTextureInfo&) = default;
+
+ bool operator==(const GrYUVABackendTextureInfo&) const;
+ bool operator!=(const GrYUVABackendTextureInfo& that) const { return !(*this == that); }
+
+ const SkYUVAInfo& yuvaInfo() const { return fYUVAInfo; }
+
+ SkYUVColorSpace yuvColorSpace() const { return fYUVAInfo.yuvColorSpace(); }
+
+ GrMipmapped mipmapped() const { return fMipmapped; }
+
+ GrSurfaceOrigin textureOrigin() const { return fTextureOrigin; }
+
+ /** The number of SkPixmap planes, 0 if this GrYUVABackendTextureInfo is invalid. */
+ int numPlanes() const { return fYUVAInfo.numPlanes(); }
+
+ /** Format of the ith plane, or invalid format if i >= numPlanes() */
+ const GrBackendFormat& planeFormat(int i) const { return fPlaneFormats[i]; }
+
+ /**
+ * Returns true if this has been configured with a valid SkYUVAInfo with compatible texture
+ * formats.
+ */
+ bool isValid() const { return fYUVAInfo.isValid(); }
+
+ /**
+ * Computes a YUVALocations representation of the planar layout. The result is guaranteed to be
+ * valid if this->isValid().
+ */
+ SkYUVAInfo::YUVALocations toYUVALocations() const;
+
+private:
+ SkYUVAInfo fYUVAInfo;
+ GrBackendFormat fPlaneFormats[kMaxPlanes];
+ GrMipmapped fMipmapped = GrMipmapped::kNo;
+ GrSurfaceOrigin fTextureOrigin = kTopLeft_GrSurfaceOrigin;
+};
+
+/**
+ * A set of GrBackendTextures that hold the planar data for an image described a SkYUVAInfo.
+ */
+class SK_API GrYUVABackendTextures {
+public:
+ GrYUVABackendTextures() = default;
+ GrYUVABackendTextures(const GrYUVABackendTextures&) = delete;
+ GrYUVABackendTextures(GrYUVABackendTextures&&) = default;
+
+ GrYUVABackendTextures& operator=(const GrYUVABackendTextures&) = delete;
+ GrYUVABackendTextures& operator=(GrYUVABackendTextures&&) = default;
+
+ GrYUVABackendTextures(const SkYUVAInfo&,
+ const GrBackendTexture[SkYUVAInfo::kMaxPlanes],
+ GrSurfaceOrigin textureOrigin);
+
+ const std::array<GrBackendTexture, SkYUVAInfo::kMaxPlanes>& textures() const {
+ return fTextures;
+ }
+
+ GrBackendTexture texture(int i) const {
+ SkASSERT(i >= 0 && i < SkYUVAInfo::kMaxPlanes);
+ return fTextures[static_cast<size_t>(i)];
+ }
+
+ const SkYUVAInfo& yuvaInfo() const { return fYUVAInfo; }
+
+ int numPlanes() const { return fYUVAInfo.numPlanes(); }
+
+ GrSurfaceOrigin textureOrigin() const { return fTextureOrigin; }
+
+ bool isValid() const { return fYUVAInfo.isValid(); }
+
+ /**
+ * Computes a YUVALocations representation of the planar layout. The result is guaranteed to be
+ * valid if this->isValid().
+ */
+ SkYUVAInfo::YUVALocations toYUVALocations() const;
+
+private:
+ SkYUVAInfo fYUVAInfo;
+ std::array<GrBackendTexture, SkYUVAInfo::kMaxPlanes> fTextures;
+ GrSurfaceOrigin fTextureOrigin = kTopLeft_GrSurfaceOrigin;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/MutableTextureState.h b/gfx/skia/skia/include/gpu/MutableTextureState.h
new file mode 100644
index 0000000000..19b7cd54c6
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/MutableTextureState.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2022 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_MutableTextureState_DEFINED
+#define skgpu_MutableTextureState_DEFINED
+
+#include "include/gpu/GpuTypes.h"
+
+#ifdef SK_VULKAN
+#include "include/private/gpu/vk/VulkanTypesPriv.h"
+#endif
+
+#include <new>
+
+class GrVkGpu;
+
+namespace skgpu {
+
+/**
+ * Since Skia and clients can both modify gpu textures and their connected state, Skia needs a way
+ * for clients to inform us if they have modifiend any of this state. In order to not need setters
+ * for every single API and state, we use this class to be a generic wrapper around all the mutable
+ * state. This class is used for calls that inform Skia of these texture/image state changes by the
+ * client as well as for requesting state changes to be done by Skia. The backend specific state
+ * that is wrapped by this class are:
+ *
+ * Vulkan: VkImageLayout and QueueFamilyIndex
+ */
+class SK_API MutableTextureState {
+public:
+ MutableTextureState() {}
+
+#ifdef SK_VULKAN
+ MutableTextureState(VkImageLayout layout, uint32_t queueFamilyIndex)
+ : fVkState(layout, queueFamilyIndex)
+ , fBackend(BackendApi::kVulkan)
+ , fIsValid(true) {}
+#endif
+
+ MutableTextureState(const MutableTextureState& that)
+ : fBackend(that.fBackend), fIsValid(that.fIsValid) {
+ if (!fIsValid) {
+ return;
+ }
+ switch (fBackend) {
+ case BackendApi::kVulkan:
+ #ifdef SK_VULKAN
+ SkASSERT(that.fBackend == BackendApi::kVulkan);
+ fVkState = that.fVkState;
+ #endif
+ break;
+ default:
+ (void)that;
+ SkUNREACHABLE;
+ }
+ }
+
+ MutableTextureState& operator=(const MutableTextureState& that) {
+ if (this != &that) {
+ this->~MutableTextureState();
+ new (this) MutableTextureState(that);
+ }
+ return *this;
+ }
+
+#ifdef SK_VULKAN
+ // If this class is not Vulkan backed it will return value of VK_IMAGE_LAYOUT_UNDEFINED.
+ // Otherwise it will return the VkImageLayout.
+ VkImageLayout getVkImageLayout() const {
+ if (this->isValid() && fBackend != BackendApi::kVulkan) {
+ return VK_IMAGE_LAYOUT_UNDEFINED;
+ }
+ return fVkState.getImageLayout();
+ }
+
+ // If this class is not Vulkan backed it will return value of VK_QUEUE_FAMILY_IGNORED.
+ // Otherwise it will return the VkImageLayout.
+ uint32_t getQueueFamilyIndex() const {
+ if (this->isValid() && fBackend != BackendApi::kVulkan) {
+ return VK_QUEUE_FAMILY_IGNORED;
+ }
+ return fVkState.getQueueFamilyIndex();
+ }
+#endif
+
+ BackendApi backend() const { return fBackend; }
+
+ // Returns true if the backend mutable state has been initialized.
+ bool isValid() const { return fIsValid; }
+
+private:
+ friend class MutableTextureStateRef;
+ friend class ::GrVkGpu;
+
+#ifdef SK_VULKAN
+ void setVulkanState(VkImageLayout layout, uint32_t queueFamilyIndex) {
+ SkASSERT(!this->isValid() || fBackend == BackendApi::kVulkan);
+ fVkState.setImageLayout(layout);
+ fVkState.setQueueFamilyIndex(queueFamilyIndex);
+ fBackend = BackendApi::kVulkan;
+ fIsValid = true;
+ }
+#endif
+
+ union {
+ char fPlaceholder;
+#ifdef SK_VULKAN
+ VulkanMutableTextureState fVkState;
+#endif
+ };
+
+ BackendApi fBackend = BackendApi::kMock;
+ bool fIsValid = false;
+};
+
+} // namespace skgpu
+
+#endif // skgpu_MutableTextureState_DEFINED
diff --git a/gfx/skia/skia/include/gpu/ShaderErrorHandler.h b/gfx/skia/skia/include/gpu/ShaderErrorHandler.h
new file mode 100644
index 0000000000..8960da5c5a
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/ShaderErrorHandler.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_ShaderErrorHandler_DEFINED
+#define skgpu_ShaderErrorHandler_DEFINED
+
+#include "include/core/SkTypes.h"
+
+namespace skgpu {
+/**
+ * Abstract class to report errors when compiling shaders.
+ */
+class SK_API ShaderErrorHandler {
+public:
+ virtual ~ShaderErrorHandler() = default;
+
+ virtual void compileError(const char* shader, const char* errors) = 0;
+
+protected:
+ ShaderErrorHandler() = default;
+ ShaderErrorHandler(const ShaderErrorHandler&) = delete;
+ ShaderErrorHandler& operator=(const ShaderErrorHandler&) = delete;
+};
+
+/**
+ * Used when no error handler is set. Will report failures via SkDebugf and asserts.
+ */
+ShaderErrorHandler* DefaultShaderErrorHandler();
+
+} // namespace skgpu
+
+#endif // skgpu_ShaderErrorHandler_DEFINED
diff --git a/gfx/skia/skia/include/gpu/d3d/GrD3DBackendContext.h b/gfx/skia/skia/include/gpu/d3d/GrD3DBackendContext.h
new file mode 100644
index 0000000000..bb85e52e5c
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/d3d/GrD3DBackendContext.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrD3DBackendContext_DEFINED
+#define GrD3DBackendContext_DEFINED
+
+// GrD3DTypes.h includes d3d12.h, which in turn includes windows.h, which redefines many
+// common identifiers such as:
+// * interface
+// * small
+// * near
+// * far
+// * CreateSemaphore
+// * MemoryBarrier
+//
+// You should only include GrD3DBackendContext.h if you are prepared to rename those identifiers.
+#include "include/gpu/d3d/GrD3DTypes.h"
+
+#include "include/gpu/GrTypes.h"
+
+// The BackendContext contains all of the base D3D objects needed by the GrD3DGpu. The assumption
+// is that the client will set these up and pass them to the GrD3DGpu constructor.
+struct SK_API GrD3DBackendContext {
+ gr_cp<IDXGIAdapter1> fAdapter;
+ gr_cp<ID3D12Device> fDevice;
+ gr_cp<ID3D12CommandQueue> fQueue;
+ sk_sp<GrD3DMemoryAllocator> fMemoryAllocator;
+ GrProtected fProtectedContext = GrProtected::kNo;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/d3d/GrD3DTypes.h b/gfx/skia/skia/include/gpu/d3d/GrD3DTypes.h
new file mode 100644
index 0000000000..b595422e86
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/d3d/GrD3DTypes.h
@@ -0,0 +1,248 @@
+
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrD3DTypes_DEFINED
+#define GrD3DTypes_DEFINED
+
+// This file includes d3d12.h, which in turn includes windows.h, which redefines many
+// common identifiers such as:
+// * interface
+// * small
+// * near
+// * far
+// * CreateSemaphore
+// * MemoryBarrier
+//
+// You should only include this header if you need the Direct3D definitions and are
+// prepared to rename those identifiers.
+
+#include "include/core/SkRefCnt.h"
+#include "include/gpu/GpuTypes.h"
+#include <d3d12.h>
+#include <dxgi1_4.h>
+
+class GrD3DGpu;
+
+ /** Check if the argument is non-null, and if so, call obj->AddRef() and return obj.
+ */
+template <typename T> static inline T* GrSafeComAddRef(T* obj) {
+ if (obj) {
+ obj->AddRef();
+ }
+ return obj;
+}
+
+/** Check if the argument is non-null, and if so, call obj->Release()
+ */
+template <typename T> static inline void GrSafeComRelease(T* obj) {
+ if (obj) {
+ obj->Release();
+ }
+}
+
+template <typename T> class gr_cp {
+public:
+ using element_type = T;
+
+ constexpr gr_cp() : fObject(nullptr) {}
+ constexpr gr_cp(std::nullptr_t) : fObject(nullptr) {}
+
+ /**
+ * Shares the underlying object by calling AddRef(), so that both the argument and the newly
+ * created gr_cp both have a reference to it.
+ */
+ gr_cp(const gr_cp<T>& that) : fObject(GrSafeComAddRef(that.get())) {}
+
+ /**
+ * Move the underlying object from the argument to the newly created gr_cp. Afterwards only
+ * the new gr_cp will have a reference to the object, and the argument will point to null.
+ * No call to AddRef() or Release() will be made.
+ */
+ gr_cp(gr_cp<T>&& that) : fObject(that.release()) {}
+
+ /**
+ * Adopt the bare object into the newly created gr_cp.
+ * No call to AddRef() or Release() will be made.
+ */
+ explicit gr_cp(T* obj) {
+ fObject = obj;
+ }
+
+ /**
+ * Calls Release() on the underlying object pointer.
+ */
+ ~gr_cp() {
+ GrSafeComRelease(fObject);
+ SkDEBUGCODE(fObject = nullptr);
+ }
+
+ /**
+ * Shares the underlying object referenced by the argument by calling AddRef() on it. If this
+ * gr_cp previously had a reference to an object (i.e. not null) it will call Release()
+ * on that object.
+ */
+ gr_cp<T>& operator=(const gr_cp<T>& that) {
+ if (this != &that) {
+ this->reset(GrSafeComAddRef(that.get()));
+ }
+ return *this;
+ }
+
+ /**
+ * Move the underlying object from the argument to the gr_cp. If the gr_cp
+ * previously held a reference to another object, Release() will be called on that object.
+ * No call to AddRef() will be made.
+ */
+ gr_cp<T>& operator=(gr_cp<T>&& that) {
+ this->reset(that.release());
+ return *this;
+ }
+
+ explicit operator bool() const { return this->get() != nullptr; }
+
+ T* get() const { return fObject; }
+ T* operator->() const { return fObject; }
+ T** operator&() { return &fObject; }
+
+ /**
+ * Adopt the new object, and call Release() on any previously held object (if not null).
+ * No call to AddRef() will be made.
+ */
+ void reset(T* object = nullptr) {
+ T* oldObject = fObject;
+ fObject = object;
+ GrSafeComRelease(oldObject);
+ }
+
+ /**
+ * Shares the new object by calling AddRef() on it. If this gr_cp previously had a
+ * reference to an object (i.e. not null) it will call Release() on that object.
+ */
+ void retain(T* object) {
+ if (this->fObject != object) {
+ this->reset(GrSafeComAddRef(object));
+ }
+ }
+
+ /**
+ * Return the original object, and set the internal object to nullptr.
+ * The caller must assume ownership of the object, and manage its reference count directly.
+ * No call to Release() will be made.
+ */
+ T* SK_WARN_UNUSED_RESULT release() {
+ T* obj = fObject;
+ fObject = nullptr;
+ return obj;
+ }
+
+private:
+ T* fObject;
+};
+
+template <typename T> inline bool operator==(const gr_cp<T>& a,
+ const gr_cp<T>& b) {
+ return a.get() == b.get();
+}
+
+template <typename T> inline bool operator!=(const gr_cp<T>& a,
+ const gr_cp<T>& b) {
+ return a.get() != b.get();
+}
+
+// interface classes for the GPU memory allocator
+class GrD3DAlloc : public SkRefCnt {
+public:
+ ~GrD3DAlloc() override = default;
+};
+
+class GrD3DMemoryAllocator : public SkRefCnt {
+public:
+ virtual gr_cp<ID3D12Resource> createResource(D3D12_HEAP_TYPE, const D3D12_RESOURCE_DESC*,
+ D3D12_RESOURCE_STATES initialResourceState,
+ sk_sp<GrD3DAlloc>* allocation,
+ const D3D12_CLEAR_VALUE*) = 0;
+ virtual gr_cp<ID3D12Resource> createAliasingResource(sk_sp<GrD3DAlloc>& allocation,
+ uint64_t localOffset,
+ const D3D12_RESOURCE_DESC*,
+ D3D12_RESOURCE_STATES initialResourceState,
+ const D3D12_CLEAR_VALUE*) = 0;
+};
+
+// Note: there is no notion of Borrowed or Adopted resources in the D3D backend,
+// so Ganesh will ref fResource once it's asked to wrap it.
+// Clients are responsible for releasing their own ref to avoid memory leaks.
+struct GrD3DTextureResourceInfo {
+ gr_cp<ID3D12Resource> fResource = nullptr;
+ sk_sp<GrD3DAlloc> fAlloc = nullptr;
+ D3D12_RESOURCE_STATES fResourceState = D3D12_RESOURCE_STATE_COMMON;
+ DXGI_FORMAT fFormat = DXGI_FORMAT_UNKNOWN;
+ uint32_t fSampleCount = 1;
+ uint32_t fLevelCount = 0;
+ unsigned int fSampleQualityPattern = DXGI_STANDARD_MULTISAMPLE_QUALITY_PATTERN;
+ skgpu::Protected fProtected = skgpu::Protected::kNo;
+
+ GrD3DTextureResourceInfo() = default;
+
+ GrD3DTextureResourceInfo(ID3D12Resource* resource,
+ const sk_sp<GrD3DAlloc> alloc,
+ D3D12_RESOURCE_STATES resourceState,
+ DXGI_FORMAT format,
+ uint32_t sampleCount,
+ uint32_t levelCount,
+ unsigned int sampleQualityLevel,
+ skgpu::Protected isProtected = skgpu::Protected::kNo)
+ : fResource(resource)
+ , fAlloc(alloc)
+ , fResourceState(resourceState)
+ , fFormat(format)
+ , fSampleCount(sampleCount)
+ , fLevelCount(levelCount)
+ , fSampleQualityPattern(sampleQualityLevel)
+ , fProtected(isProtected) {}
+
+ GrD3DTextureResourceInfo(const GrD3DTextureResourceInfo& info,
+ D3D12_RESOURCE_STATES resourceState)
+ : fResource(info.fResource)
+ , fAlloc(info.fAlloc)
+ , fResourceState(resourceState)
+ , fFormat(info.fFormat)
+ , fSampleCount(info.fSampleCount)
+ , fLevelCount(info.fLevelCount)
+ , fSampleQualityPattern(info.fSampleQualityPattern)
+ , fProtected(info.fProtected) {}
+
+#if GR_TEST_UTILS
+ bool operator==(const GrD3DTextureResourceInfo& that) const {
+ return fResource == that.fResource && fResourceState == that.fResourceState &&
+ fFormat == that.fFormat && fSampleCount == that.fSampleCount &&
+ fLevelCount == that.fLevelCount &&
+ fSampleQualityPattern == that.fSampleQualityPattern && fProtected == that.fProtected;
+ }
+#endif
+};
+
+struct GrD3DFenceInfo {
+ GrD3DFenceInfo()
+ : fFence(nullptr)
+ , fValue(0) {
+ }
+
+ gr_cp<ID3D12Fence> fFence;
+ uint64_t fValue; // signal value for the fence
+};
+
+struct GrD3DSurfaceInfo {
+ uint32_t fSampleCount = 1;
+ uint32_t fLevelCount = 0;
+ skgpu::Protected fProtected = skgpu::Protected::kNo;
+
+ DXGI_FORMAT fFormat = DXGI_FORMAT_UNKNOWN;
+ unsigned int fSampleQualityPattern = DXGI_STANDARD_MULTISAMPLE_QUALITY_PATTERN;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/dawn/GrDawnTypes.h b/gfx/skia/skia/include/gpu/dawn/GrDawnTypes.h
new file mode 100644
index 0000000000..fbd3dbaf55
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/dawn/GrDawnTypes.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDawnTypes_DEFINED
+#define GrDawnTypes_DEFINED
+
+#include "include/gpu/GpuTypes.h"
+
+#ifdef Always
+#undef Always
+static constexpr int Always = 2;
+#endif
+#ifdef Success
+#undef Success
+static constexpr int Success = 0;
+#endif
+#ifdef None
+#undef None
+static constexpr int None = 0L;
+#endif
+#include "webgpu/webgpu_cpp.h" // IWYU pragma: export
+
+struct GrDawnTextureInfo {
+ wgpu::Texture fTexture;
+ wgpu::TextureFormat fFormat;
+ uint32_t fLevelCount;
+ GrDawnTextureInfo() : fTexture(nullptr), fFormat(), fLevelCount(0) {
+ }
+ GrDawnTextureInfo(const GrDawnTextureInfo& other)
+ : fTexture(other.fTexture)
+ , fFormat(other.fFormat)
+ , fLevelCount(other.fLevelCount) {
+ }
+ GrDawnTextureInfo& operator=(const GrDawnTextureInfo& other) {
+ fTexture = other.fTexture;
+ fFormat = other.fFormat;
+ fLevelCount = other.fLevelCount;
+ return *this;
+ }
+ bool operator==(const GrDawnTextureInfo& other) const {
+ return fTexture.Get() == other.fTexture.Get() &&
+ fFormat == other.fFormat &&
+ fLevelCount == other.fLevelCount;
+ }
+};
+
+// GrDawnRenderTargetInfo holds a reference to a (1-mip) TextureView. This means that, for now,
+// GrDawnRenderTarget is suitable for rendering, but not readPixels() or writePixels(). Also,
+// backdrop filters and certain blend modes requiring copying the destination framebuffer
+// will not work.
+struct GrDawnRenderTargetInfo {
+ wgpu::TextureView fTextureView;
+ wgpu::TextureFormat fFormat;
+ uint32_t fLevelCount;
+ GrDawnRenderTargetInfo() : fTextureView(nullptr), fFormat(), fLevelCount(0) {
+ }
+ GrDawnRenderTargetInfo(const GrDawnRenderTargetInfo& other)
+ : fTextureView(other.fTextureView)
+ , fFormat(other.fFormat)
+ , fLevelCount(other.fLevelCount) {
+ }
+ explicit GrDawnRenderTargetInfo(const GrDawnTextureInfo& texInfo)
+ : fFormat(texInfo.fFormat)
+ , fLevelCount(1) {
+ wgpu::TextureViewDescriptor desc;
+ desc.format = texInfo.fFormat;
+ desc.mipLevelCount = 1;
+ fTextureView = texInfo.fTexture.CreateView(&desc);
+ }
+ GrDawnRenderTargetInfo& operator=(const GrDawnRenderTargetInfo& other) {
+ fTextureView = other.fTextureView;
+ fFormat = other.fFormat;
+ fLevelCount = other.fLevelCount;
+ return *this;
+ }
+ bool operator==(const GrDawnRenderTargetInfo& other) const {
+ return fTextureView.Get() == other.fTextureView.Get() &&
+ fFormat == other.fFormat &&
+ fLevelCount == other.fLevelCount;
+ }
+};
+
+struct GrDawnSurfaceInfo {
+ uint32_t fSampleCount = 1;
+ uint32_t fLevelCount = 0;
+ skgpu::Protected fProtected = skgpu::Protected::kNo;
+
+ wgpu::TextureFormat fFormat;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/gl/GrGLAssembleHelpers.h b/gfx/skia/skia/include/gpu/gl/GrGLAssembleHelpers.h
new file mode 100644
index 0000000000..bfa2aea376
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/gl/GrGLAssembleHelpers.h
@@ -0,0 +1,11 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/gpu/gl/GrGLAssembleInterface.h"
+
+void GrGetEGLQueryAndDisplay(GrEGLQueryStringFn** queryString, GrEGLDisplay* display,
+ void* ctx, GrGLGetProc get);
diff --git a/gfx/skia/skia/include/gpu/gl/GrGLAssembleInterface.h b/gfx/skia/skia/include/gpu/gl/GrGLAssembleInterface.h
new file mode 100644
index 0000000000..4f9f9f9ee0
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/gl/GrGLAssembleInterface.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/gpu/gl/GrGLInterface.h"
+
+typedef GrGLFuncPtr (*GrGLGetProc)(void* ctx, const char name[]);
+
+/**
+ * Generic function for creating a GrGLInterface for an either OpenGL or GLES. It calls
+ * get() to get each function address. ctx is a generic ptr passed to and interpreted by get().
+ */
+SK_API sk_sp<const GrGLInterface> GrGLMakeAssembledInterface(void *ctx, GrGLGetProc get);
+
+/**
+ * Generic function for creating a GrGLInterface for an OpenGL (but not GLES) context. It calls
+ * get() to get each function address. ctx is a generic ptr passed to and interpreted by get().
+ */
+SK_API sk_sp<const GrGLInterface> GrGLMakeAssembledGLInterface(void *ctx, GrGLGetProc get);
+
+/**
+ * Generic function for creating a GrGLInterface for an OpenGL ES (but not Open GL) context. It
+ * calls get() to get each function address. ctx is a generic ptr passed to and interpreted by
+ * get().
+ */
+SK_API sk_sp<const GrGLInterface> GrGLMakeAssembledGLESInterface(void *ctx, GrGLGetProc get);
+
+/**
+ * Generic function for creating a GrGLInterface for a WebGL (similar to OpenGL ES) context. It
+ * calls get() to get each function address. ctx is a generic ptr passed to and interpreted by
+ * get().
+ */
+SK_API sk_sp<const GrGLInterface> GrGLMakeAssembledWebGLInterface(void *ctx, GrGLGetProc get);
+
+/** Deprecated version of GrGLMakeAssembledInterface() that returns a bare pointer. */
+SK_API const GrGLInterface* GrGLAssembleInterface(void *ctx, GrGLGetProc get);
diff --git a/gfx/skia/skia/include/gpu/gl/GrGLConfig.h b/gfx/skia/skia/include/gpu/gl/GrGLConfig.h
new file mode 100644
index 0000000000..e3573486ca
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/gl/GrGLConfig.h
@@ -0,0 +1,79 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+
+#ifndef GrGLConfig_DEFINED
+#define GrGLConfig_DEFINED
+
+#include "include/gpu/GrTypes.h"
+
+/**
+ * Optional GL config file.
+ */
+#ifdef GR_GL_CUSTOM_SETUP_HEADER
+ #include GR_GL_CUSTOM_SETUP_HEADER
+#endif
+
+#if !defined(GR_GL_FUNCTION_TYPE)
+ #if defined(SK_BUILD_FOR_WIN)
+ #define GR_GL_FUNCTION_TYPE __stdcall
+ #else
+ #define GR_GL_FUNCTION_TYPE
+ #endif
+#endif
+
+/**
+ * The following are optional defines that can be enabled at the compiler
+ * command line, in a IDE project, in a GrUserConfig.h file, or in a GL custom
+ * file (if one is in use). If a GR_GL_CUSTOM_SETUP_HEADER is used they can
+ * also be placed there.
+ *
+ * GR_GL_LOG_CALLS: if 1 Gr can print every GL call using SkDebugf. Defaults to
+ * 0. Logging can be enabled and disabled at runtime using a debugger via to
+ * global gLogCallsGL. The initial value of gLogCallsGL is controlled by
+ * GR_GL_LOG_CALLS_START.
+ *
+ * GR_GL_LOG_CALLS_START: controls the initial value of gLogCallsGL when
+ * GR_GL_LOG_CALLS is 1. Defaults to 0.
+ *
+ * GR_GL_CHECK_ERROR: if enabled Gr can do a glGetError() after every GL call.
+ * Defaults to 1 if SK_DEBUG is set, otherwise 0. When GR_GL_CHECK_ERROR is 1
+ * this can be toggled in a debugger using the gCheckErrorGL global. The initial
+ * value of gCheckErrorGL is controlled by by GR_GL_CHECK_ERROR_START.
+ *
+ * GR_GL_CHECK_ERROR_START: controls the initial value of gCheckErrorGL
+ * when GR_GL_CHECK_ERROR is 1. Defaults to 1.
+ *
+ */
+
+#if !defined(GR_GL_LOG_CALLS)
+ #ifdef SK_DEBUG
+ #define GR_GL_LOG_CALLS 1
+ #else
+ #define GR_GL_LOG_CALLS 0
+ #endif
+#endif
+
+#if !defined(GR_GL_LOG_CALLS_START)
+ #define GR_GL_LOG_CALLS_START 0
+#endif
+
+#if !defined(GR_GL_CHECK_ERROR)
+ #ifdef SK_DEBUG
+ #define GR_GL_CHECK_ERROR 1
+ #else
+ #define GR_GL_CHECK_ERROR 0
+ #endif
+#endif
+
+#if !defined(GR_GL_CHECK_ERROR_START)
+ #define GR_GL_CHECK_ERROR_START 1
+#endif
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/gl/GrGLConfig_chrome.h b/gfx/skia/skia/include/gpu/gl/GrGLConfig_chrome.h
new file mode 100644
index 0000000000..40127d1704
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/gl/GrGLConfig_chrome.h
@@ -0,0 +1,14 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef GrGLConfig_chrome_DEFINED
+#define GrGLConfig_chrome_DEFINED
+
+// glGetError() forces a sync with gpu process on chrome
+#define GR_GL_CHECK_ERROR_START 0
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/gl/GrGLExtensions.h b/gfx/skia/skia/include/gpu/gl/GrGLExtensions.h
new file mode 100644
index 0000000000..dfa83e1962
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/gl/GrGLExtensions.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLExtensions_DEFINED
+#define GrGLExtensions_DEFINED
+
+#include "include/core/SkString.h"
+#include "include/gpu/gl/GrGLFunctions.h"
+#include "include/private/base/SkTArray.h"
+
+#include <utility>
+
+struct GrGLInterface;
+class SkJSONWriter;
+
+/**
+ * This helper queries the current GL context for its extensions, remembers them, and can be
+ * queried. It supports both glGetString- and glGetStringi-style extension string APIs and will
+ * use the latter if it is available. It also will query for EGL extensions if a eglQueryString
+ * implementation is provided.
+ */
+class SK_API GrGLExtensions {
+public:
+ GrGLExtensions() {}
+
+ GrGLExtensions(const GrGLExtensions&);
+
+ GrGLExtensions& operator=(const GrGLExtensions&);
+
+ void swap(GrGLExtensions* that) {
+ using std::swap;
+ swap(fStrings, that->fStrings);
+ swap(fInitialized, that->fInitialized);
+ }
+
+ /**
+ * We sometimes need to use this class without having yet created a GrGLInterface. This version
+ * of init expects that getString is always non-NULL while getIntegerv and getStringi are non-
+ * NULL if on desktop GL with version 3.0 or higher. Otherwise it will fail.
+ */
+ bool init(GrGLStandard standard,
+ GrGLFunction<GrGLGetStringFn> getString,
+ GrGLFunction<GrGLGetStringiFn> getStringi,
+ GrGLFunction<GrGLGetIntegervFn> getIntegerv,
+ GrGLFunction<GrEGLQueryStringFn> queryString = nullptr,
+ GrEGLDisplay eglDisplay = nullptr);
+
+ bool isInitialized() const { return fInitialized; }
+
+ /**
+ * Queries whether an extension is present. This will fail if init() has not been called.
+ */
+ bool has(const char[]) const;
+
+ /**
+ * Removes an extension if present. Returns true if the extension was present before the call.
+ */
+ bool remove(const char[]);
+
+ /**
+ * Adds an extension to list
+ */
+ void add(const char[]);
+
+ void reset() { fStrings.clear(); }
+
+ void dumpJSON(SkJSONWriter*) const;
+
+private:
+ bool fInitialized = false;
+ SkTArray<SkString> fStrings;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/gl/GrGLFunctions.h b/gfx/skia/skia/include/gpu/gl/GrGLFunctions.h
new file mode 100644
index 0000000000..4e488abcad
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/gl/GrGLFunctions.h
@@ -0,0 +1,307 @@
+
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLFunctions_DEFINED
+#define GrGLFunctions_DEFINED
+
+#include <cstring>
+#include "include/gpu/gl/GrGLTypes.h"
+#include "include/private/base/SkTLogic.h"
+
+
+extern "C" {
+
+///////////////////////////////////////////////////////////////////////////////
+
+using GrGLActiveTextureFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum texture);
+using GrGLAttachShaderFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint program, GrGLuint shader);
+using GrGLBeginQueryFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLuint id);
+using GrGLBindAttribLocationFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint program, GrGLuint index, const char* name);
+using GrGLBindBufferFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLuint buffer);
+using GrGLBindFramebufferFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLuint framebuffer);
+using GrGLBindRenderbufferFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLuint renderbuffer);
+using GrGLBindTextureFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLuint texture);
+using GrGLBindFragDataLocationFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint program, GrGLuint colorNumber, const GrGLchar* name);
+using GrGLBindFragDataLocationIndexedFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint program, GrGLuint colorNumber, GrGLuint index, const GrGLchar* name);
+using GrGLBindSamplerFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint unit, GrGLuint sampler);
+using GrGLBindVertexArrayFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint array);
+using GrGLBlendBarrierFn = GrGLvoid GR_GL_FUNCTION_TYPE();
+using GrGLBlendColorFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLclampf red, GrGLclampf green, GrGLclampf blue, GrGLclampf alpha);
+using GrGLBlendEquationFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode);
+using GrGLBlendFuncFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum sfactor, GrGLenum dfactor);
+using GrGLBlitFramebufferFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint srcX0, GrGLint srcY0, GrGLint srcX1, GrGLint srcY1, GrGLint dstX0, GrGLint dstY0, GrGLint dstX1, GrGLint dstY1, GrGLbitfield mask, GrGLenum filter);
+using GrGLBufferDataFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLsizeiptr size, const GrGLvoid* data, GrGLenum usage);
+using GrGLBufferSubDataFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLintptr offset, GrGLsizeiptr size, const GrGLvoid* data);
+using GrGLCheckFramebufferStatusFn = GrGLenum GR_GL_FUNCTION_TYPE(GrGLenum target);
+using GrGLClearFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLbitfield mask);
+using GrGLClearColorFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLclampf red, GrGLclampf green, GrGLclampf blue, GrGLclampf alpha);
+using GrGLClearStencilFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint s);
+using GrGLClearTexImageFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint texture, GrGLint level, GrGLenum format, GrGLenum type, const GrGLvoid* data);
+using GrGLClearTexSubImageFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint texture, GrGLint level, GrGLint xoffset, GrGLint yoffset, GrGLint zoffset, GrGLsizei width, GrGLsizei height, GrGLsizei depth, GrGLenum format, GrGLenum type, const GrGLvoid* data);
+using GrGLColorMaskFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLboolean red, GrGLboolean green, GrGLboolean blue, GrGLboolean alpha);
+using GrGLCompileShaderFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint shader);
+using GrGLCompressedTexImage2DFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLint level, GrGLenum internalformat, GrGLsizei width, GrGLsizei height, GrGLint border, GrGLsizei imageSize, const GrGLvoid* data);
+using GrGLCompressedTexSubImage2DFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLint level, GrGLint xoffset, GrGLint yoffset, GrGLsizei width, GrGLsizei height, GrGLenum format, GrGLsizei imageSize, const GrGLvoid* data);
+using GrGLCopyBufferSubDataFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum readTargt, GrGLenum writeTarget, GrGLintptr readOffset, GrGLintptr writeOffset, GrGLsizeiptr size);
+using GrGLCopyTexSubImage2DFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLint level, GrGLint xoffset, GrGLint yoffset, GrGLint x, GrGLint y, GrGLsizei width, GrGLsizei height);
+using GrGLCreateProgramFn = GrGLuint GR_GL_FUNCTION_TYPE();
+using GrGLCreateShaderFn = GrGLuint GR_GL_FUNCTION_TYPE(GrGLenum type);
+using GrGLCullFaceFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode);
+using GrGLDeleteBuffersFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei n, const GrGLuint* buffers);
+using GrGLDeleteFencesFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei n, const GrGLuint* fences);
+using GrGLDeleteFramebuffersFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei n, const GrGLuint* framebuffers);
+using GrGLDeleteProgramFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint program);
+using GrGLDeleteQueriesFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei n, const GrGLuint* ids);
+using GrGLDeleteRenderbuffersFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei n, const GrGLuint* renderbuffers);
+using GrGLDeleteSamplersFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei count, const GrGLuint* samplers);
+using GrGLDeleteShaderFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint shader);
+using GrGLDeleteTexturesFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei n, const GrGLuint* textures);
+using GrGLDeleteVertexArraysFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei n, const GrGLuint* arrays);
+using GrGLDepthMaskFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLboolean flag);
+using GrGLDisableFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum cap);
+using GrGLDisableVertexAttribArrayFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint index);
+using GrGLDrawArraysFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode, GrGLint first, GrGLsizei count);
+using GrGLDrawArraysInstancedFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode, GrGLint first, GrGLsizei count, GrGLsizei primcount);
+using GrGLDrawArraysIndirectFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode, const GrGLvoid* indirect);
+using GrGLDrawBufferFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode);
+using GrGLDrawBuffersFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei n, const GrGLenum* bufs);
+using GrGLDrawElementsFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode, GrGLsizei count, GrGLenum type, const GrGLvoid* indices);
+using GrGLDrawElementsInstancedFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode, GrGLsizei count, GrGLenum type, const GrGLvoid* indices, GrGLsizei primcount);
+using GrGLDrawElementsIndirectFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode, GrGLenum type, const GrGLvoid* indirect);
+using GrGLDrawRangeElementsFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode, GrGLuint start, GrGLuint end, GrGLsizei count, GrGLenum type, const GrGLvoid* indices);
+using GrGLEnableFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum cap);
+using GrGLEnableVertexAttribArrayFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint index);
+using GrGLEndQueryFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target);
+using GrGLFinishFn = GrGLvoid GR_GL_FUNCTION_TYPE();
+using GrGLFinishFenceFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint fence);
+using GrGLFlushFn = GrGLvoid GR_GL_FUNCTION_TYPE();
+using GrGLFlushMappedBufferRangeFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLintptr offset, GrGLsizeiptr length);
+using GrGLFramebufferRenderbufferFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum attachment, GrGLenum renderbuffertarget, GrGLuint renderbuffer);
+using GrGLFramebufferTexture2DFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum attachment, GrGLenum textarget, GrGLuint texture, GrGLint level);
+using GrGLFramebufferTexture2DMultisampleFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum attachment, GrGLenum textarget, GrGLuint texture, GrGLint level, GrGLsizei samples);
+using GrGLFrontFaceFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode);
+using GrGLGenBuffersFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei n, GrGLuint* buffers);
+using GrGLGenFencesFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei n, GrGLuint* fences);
+using GrGLGenFramebuffersFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei n, GrGLuint* framebuffers);
+using GrGLGenerateMipmapFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target);
+using GrGLGenQueriesFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei n, GrGLuint* ids);
+using GrGLGenRenderbuffersFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei n, GrGLuint* renderbuffers);
+using GrGLGenSamplersFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei count, GrGLuint* samplers);
+using GrGLGenTexturesFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei n, GrGLuint* textures);
+using GrGLGenVertexArraysFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei n, GrGLuint* arrays);
+using GrGLGetBufferParameterivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum pname, GrGLint* params);
+using GrGLGetErrorFn = GrGLenum GR_GL_FUNCTION_TYPE();
+using GrGLGetFramebufferAttachmentParameterivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum attachment, GrGLenum pname, GrGLint* params);
+using GrGLGetFloatvFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum pname, GrGLfloat* params);
+using GrGLGetIntegervFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum pname, GrGLint* params);
+using GrGLGetMultisamplefvFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum pname, GrGLuint index, GrGLfloat* val);
+using GrGLGetProgramBinaryFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint program, GrGLsizei bufsize, GrGLsizei* length, GrGLenum* binaryFormat, void* binary);
+using GrGLGetProgramInfoLogFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint program, GrGLsizei bufsize, GrGLsizei* length, char* infolog);
+using GrGLGetProgramivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint program, GrGLenum pname, GrGLint* params);
+using GrGLGetQueryivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum GLtarget, GrGLenum pname, GrGLint* params);
+using GrGLGetQueryObjecti64vFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint id, GrGLenum pname, GrGLint64* params);
+using GrGLGetQueryObjectivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint id, GrGLenum pname, GrGLint* params);
+using GrGLGetQueryObjectui64vFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint id, GrGLenum pname, GrGLuint64* params);
+using GrGLGetQueryObjectuivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint id, GrGLenum pname, GrGLuint* params);
+using GrGLGetRenderbufferParameterivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum pname, GrGLint* params);
+using GrGLGetShaderInfoLogFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint shader, GrGLsizei bufsize, GrGLsizei* length, char* infolog);
+using GrGLGetShaderivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint shader, GrGLenum pname, GrGLint* params);
+using GrGLGetShaderPrecisionFormatFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum shadertype, GrGLenum precisiontype, GrGLint* range, GrGLint* precision);
+using GrGLGetStringFn = const GrGLubyte* GR_GL_FUNCTION_TYPE(GrGLenum name);
+using GrGLGetStringiFn = const GrGLubyte* GR_GL_FUNCTION_TYPE(GrGLenum name, GrGLuint index);
+using GrGLGetTexLevelParameterivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLint level, GrGLenum pname, GrGLint* params);
+using GrGLGetUniformLocationFn = GrGLint GR_GL_FUNCTION_TYPE(GrGLuint program, const char* name);
+using GrGLInsertEventMarkerFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei length, const char* marker);
+using GrGLInvalidateBufferDataFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint buffer);
+using GrGLInvalidateBufferSubDataFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint buffer, GrGLintptr offset, GrGLsizeiptr length);
+using GrGLInvalidateFramebufferFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLsizei numAttachments, const GrGLenum* attachments);
+using GrGLInvalidateSubFramebufferFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLsizei numAttachments, const GrGLenum* attachments, GrGLint x, GrGLint y, GrGLsizei width, GrGLsizei height);
+using GrGLInvalidateTexImageFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint texture, GrGLint level);
+using GrGLInvalidateTexSubImageFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint texture, GrGLint level, GrGLint xoffset, GrGLint yoffset, GrGLint zoffset, GrGLsizei width, GrGLsizei height, GrGLsizei depth);
+using GrGLIsTextureFn = GrGLboolean GR_GL_FUNCTION_TYPE(GrGLuint texture);
+using GrGLLineWidthFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLfloat width);
+using GrGLLinkProgramFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint program);
+using GrGLMapBufferFn = GrGLvoid* GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum access);
+using GrGLMapBufferRangeFn = GrGLvoid* GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLintptr offset, GrGLsizeiptr length, GrGLbitfield access);
+using GrGLMapBufferSubDataFn = GrGLvoid* GR_GL_FUNCTION_TYPE(GrGLuint target, GrGLintptr offset, GrGLsizeiptr size, GrGLenum access);
+using GrGLMapTexSubImage2DFn = GrGLvoid* GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLint level, GrGLint xoffset, GrGLint yoffset, GrGLsizei width, GrGLsizei height, GrGLenum format, GrGLenum type, GrGLenum access);
+using GrGLMemoryBarrierFn = GrGLvoid* GR_GL_FUNCTION_TYPE(GrGLbitfield barriers);
+using GrGLPatchParameteriFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum pname, GrGLint value);
+using GrGLPixelStoreiFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum pname, GrGLint param);
+using GrGLPolygonModeFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum face, GrGLenum mode);
+using GrGLPopGroupMarkerFn = GrGLvoid GR_GL_FUNCTION_TYPE();
+using GrGLProgramBinaryFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint program, GrGLenum binaryFormat, void* binary, GrGLsizei length);
+using GrGLProgramParameteriFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint program, GrGLenum pname, GrGLint value);
+using GrGLPushGroupMarkerFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei length, const char* marker);
+using GrGLQueryCounterFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint id, GrGLenum target);
+using GrGLReadBufferFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum src);
+using GrGLReadPixelsFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint x, GrGLint y, GrGLsizei width, GrGLsizei height, GrGLenum format, GrGLenum type, GrGLvoid* pixels);
+using GrGLRenderbufferStorageFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum internalformat, GrGLsizei width, GrGLsizei height);
+using GrGLRenderbufferStorageMultisampleFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLsizei samples, GrGLenum internalformat, GrGLsizei width, GrGLsizei height);
+using GrGLResolveMultisampleFramebufferFn = GrGLvoid GR_GL_FUNCTION_TYPE();
+using GrGLSamplerParameterfFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint sampler, GrGLenum pname, GrGLfloat param);
+using GrGLSamplerParameteriFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint sampler, GrGLenum pname, GrGLint param);
+using GrGLSamplerParameterivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint sampler, GrGLenum pname, const GrGLint* params);
+using GrGLScissorFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint x, GrGLint y, GrGLsizei width, GrGLsizei height);
+// GL_CHROMIUM_bind_uniform_location
+using GrGLBindUniformLocationFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint program, GrGLint location, const char* name);
+using GrGLSetFenceFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint fence, GrGLenum condition);
+using GrGLShaderSourceFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint shader, GrGLsizei count, const char* const* str, const GrGLint* length);
+using GrGLStencilFuncFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum func, GrGLint ref, GrGLuint mask);
+using GrGLStencilFuncSeparateFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum face, GrGLenum func, GrGLint ref, GrGLuint mask);
+using GrGLStencilMaskFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint mask);
+using GrGLStencilMaskSeparateFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum face, GrGLuint mask);
+using GrGLStencilOpFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum fail, GrGLenum zfail, GrGLenum zpass);
+using GrGLStencilOpSeparateFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum face, GrGLenum fail, GrGLenum zfail, GrGLenum zpass);
+using GrGLTexBufferFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum internalformat, GrGLuint buffer);
+using GrGLTexBufferRangeFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum internalformat, GrGLuint buffer, GrGLintptr offset, GrGLsizeiptr size);
+using GrGLTexImage2DFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLint level, GrGLint internalformat, GrGLsizei width, GrGLsizei height, GrGLint border, GrGLenum format, GrGLenum type, const GrGLvoid* pixels);
+using GrGLTexParameterfFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum pname, GrGLfloat param);
+using GrGLTexParameterfvFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum pname, const GrGLfloat* params);
+using GrGLTexParameteriFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum pname, GrGLint param);
+using GrGLTexParameterivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum pname, const GrGLint* params);
+using GrGLTexStorage2DFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLsizei levels, GrGLenum internalformat, GrGLsizei width, GrGLsizei height);
+using GrGLDiscardFramebufferFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLsizei numAttachments, const GrGLenum* attachments);
+using GrGLTestFenceFn = GrGLboolean GR_GL_FUNCTION_TYPE(GrGLuint fence);
+using GrGLTexSubImage2DFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLint level, GrGLint xoffset, GrGLint yoffset, GrGLsizei width, GrGLsizei height, GrGLenum format, GrGLenum type, const GrGLvoid* pixels);
+using GrGLTextureBarrierFn = GrGLvoid GR_GL_FUNCTION_TYPE();
+using GrGLUniform1fFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLfloat v0);
+using GrGLUniform1iFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLint v0);
+using GrGLUniform1fvFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLsizei count, const GrGLfloat* v);
+using GrGLUniform1ivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLsizei count, const GrGLint* v);
+using GrGLUniform2fFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLfloat v0, GrGLfloat v1);
+using GrGLUniform2iFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLint v0, GrGLint v1);
+using GrGLUniform2fvFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLsizei count, const GrGLfloat* v);
+using GrGLUniform2ivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLsizei count, const GrGLint* v);
+using GrGLUniform3fFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLfloat v0, GrGLfloat v1, GrGLfloat v2);
+using GrGLUniform3iFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLint v0, GrGLint v1, GrGLint v2);
+using GrGLUniform3fvFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLsizei count, const GrGLfloat* v);
+using GrGLUniform3ivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLsizei count, const GrGLint* v);
+using GrGLUniform4fFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLfloat v0, GrGLfloat v1, GrGLfloat v2, GrGLfloat v3);
+using GrGLUniform4iFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLint v0, GrGLint v1, GrGLint v2, GrGLint v3);
+using GrGLUniform4fvFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLsizei count, const GrGLfloat* v);
+using GrGLUniform4ivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLsizei count, const GrGLint* v);
+using GrGLUniformMatrix2fvFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLsizei count, GrGLboolean transpose, const GrGLfloat* value);
+using GrGLUniformMatrix3fvFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLsizei count, GrGLboolean transpose, const GrGLfloat* value);
+using GrGLUniformMatrix4fvFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLsizei count, GrGLboolean transpose, const GrGLfloat* value);
+using GrGLUnmapBufferFn = GrGLboolean GR_GL_FUNCTION_TYPE(GrGLenum target);
+using GrGLUnmapBufferSubDataFn = GrGLvoid GR_GL_FUNCTION_TYPE(const GrGLvoid* mem);
+using GrGLUnmapTexSubImage2DFn = GrGLvoid GR_GL_FUNCTION_TYPE(const GrGLvoid* mem);
+using GrGLUseProgramFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint program);
+using GrGLVertexAttrib1fFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint indx, const GrGLfloat value);
+using GrGLVertexAttrib2fvFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint indx, const GrGLfloat* values);
+using GrGLVertexAttrib3fvFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint indx, const GrGLfloat* values);
+using GrGLVertexAttrib4fvFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint indx, const GrGLfloat* values);
+using GrGLVertexAttribDivisorFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint index, GrGLuint divisor);
+using GrGLVertexAttribIPointerFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint indx, GrGLint size, GrGLenum type, GrGLsizei stride, const GrGLvoid* ptr);
+using GrGLVertexAttribPointerFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint indx, GrGLint size, GrGLenum type, GrGLboolean normalized, GrGLsizei stride, const GrGLvoid* ptr);
+using GrGLViewportFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint x, GrGLint y, GrGLsizei width, GrGLsizei height);
+
+/* GL_NV_framebuffer_mixed_samples */
+using GrGLCoverageModulationFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum components);
+
+/* EXT_base_instance */
+using GrGLDrawArraysInstancedBaseInstanceFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode, GrGLint first, GrGLsizei count, GrGLsizei instancecount, GrGLuint baseinstance);
+using GrGLDrawElementsInstancedBaseVertexBaseInstanceFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode, GrGLsizei count, GrGLenum type, const void *indices, GrGLsizei instancecount, GrGLint basevertex, GrGLuint baseinstance);
+
+/* EXT_multi_draw_indirect */
+using GrGLMultiDrawArraysIndirectFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode, const GrGLvoid* indirect, GrGLsizei drawcount, GrGLsizei stride);
+using GrGLMultiDrawElementsIndirectFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode, GrGLenum type, const GrGLvoid* indirect, GrGLsizei drawcount, GrGLsizei stride);
+
+/* ANGLE_base_vertex_base_instance */
+using GrGLMultiDrawArraysInstancedBaseInstanceFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode, const GrGLint* firsts, const GrGLsizei* counts, const GrGLsizei* instanceCounts, const GrGLuint* baseInstances, const GrGLsizei drawcount);
+using GrGLMultiDrawElementsInstancedBaseVertexBaseInstanceFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode, const GrGLint* counts, GrGLenum type, const GrGLvoid* const* indices, const GrGLsizei* instanceCounts, const GrGLint* baseVertices, const GrGLuint* baseInstances, const GrGLsizei drawcount);
+
+/* ARB_sync */
+using GrGLFenceSyncFn = GrGLsync GR_GL_FUNCTION_TYPE(GrGLenum condition, GrGLbitfield flags);
+using GrGLIsSyncFn = GrGLboolean GR_GL_FUNCTION_TYPE(GrGLsync sync);
+using GrGLClientWaitSyncFn = GrGLenum GR_GL_FUNCTION_TYPE(GrGLsync sync, GrGLbitfield flags, GrGLuint64 timeout);
+using GrGLWaitSyncFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsync sync, GrGLbitfield flags, GrGLuint64 timeout);
+using GrGLDeleteSyncFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsync sync);
+
+/* ARB_internalformat_query */
+using GrGLGetInternalformativFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum internalformat, GrGLenum pname, GrGLsizei bufSize, GrGLint* params);
+
+/* KHR_debug */
+using GrGLDebugMessageControlFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum source, GrGLenum type, GrGLenum severity, GrGLsizei count, const GrGLuint* ids, GrGLboolean enabled);
+using GrGLDebugMessageInsertFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum source, GrGLenum type, GrGLuint id, GrGLenum severity, GrGLsizei length, const GrGLchar* buf);
+using GrGLDebugMessageCallbackFn = GrGLvoid GR_GL_FUNCTION_TYPE(GRGLDEBUGPROC callback, const GrGLvoid* userParam);
+using GrGLGetDebugMessageLogFn = GrGLuint GR_GL_FUNCTION_TYPE(GrGLuint count, GrGLsizei bufSize, GrGLenum* sources, GrGLenum* types, GrGLuint* ids, GrGLenum* severities, GrGLsizei* lengths, GrGLchar* messageLog);
+using GrGLPushDebugGroupFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum source, GrGLuint id, GrGLsizei length, const GrGLchar* message);
+using GrGLPopDebugGroupFn = GrGLvoid GR_GL_FUNCTION_TYPE();
+using GrGLObjectLabelFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum identifier, GrGLuint name, GrGLsizei length, const GrGLchar* label);
+
+/** EXT_window_rectangles */
+using GrGLWindowRectanglesFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode, GrGLsizei count, const GrGLint box[]);
+
+/** GL_QCOM_tiled_rendering */
+using GrGLStartTilingFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint x, GrGLuint y, GrGLuint width, GrGLuint height, GrGLbitfield preserveMask);
+using GrGLEndTilingFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLbitfield preserveMask);
+
+/** EGL functions */
+using GrEGLQueryStringFn = const char* GR_GL_FUNCTION_TYPE(GrEGLDisplay dpy, GrEGLint name);
+using GrEGLGetCurrentDisplayFn = GrEGLDisplay GR_GL_FUNCTION_TYPE();
+using GrEGLCreateImageFn = GrEGLImage GR_GL_FUNCTION_TYPE(GrEGLDisplay dpy, GrEGLContext ctx, GrEGLenum target, GrEGLClientBuffer buffer, const GrEGLint* attrib_list);
+using GrEGLDestroyImageFn = GrEGLBoolean GR_GL_FUNCTION_TYPE(GrEGLDisplay dpy, GrEGLImage image);
+} // extern "C"
+
+// This is a lighter-weight std::function, trying to reduce code size and compile time
+// by only supporting the exact use cases we require.
+template <typename T> class GrGLFunction;
+
+template <typename R, typename... Args>
+class GrGLFunction<R GR_GL_FUNCTION_TYPE(Args...)> {
+public:
+ using Fn = R GR_GL_FUNCTION_TYPE(Args...);
+ // Construct empty.
+ GrGLFunction() = default;
+ GrGLFunction(std::nullptr_t) {}
+
+ // Construct from a simple function pointer.
+ GrGLFunction(Fn* fn_ptr) {
+ static_assert(sizeof(fn_ptr) <= sizeof(fBuf), "fBuf is too small");
+ if (fn_ptr) {
+ memcpy(fBuf, &fn_ptr, sizeof(fn_ptr));
+ fCall = [](const void* buf, Args... args) {
+ return (*(Fn**)buf)(std::forward<Args>(args)...);
+ };
+ }
+ }
+
+ // Construct from a small closure.
+ template <typename Closure>
+ GrGLFunction(Closure closure) : GrGLFunction() {
+ static_assert(sizeof(Closure) <= sizeof(fBuf), "fBuf is too small");
+#if defined(__APPLE__) // I am having serious trouble getting these to work with all STLs...
+ static_assert(std::is_trivially_copyable<Closure>::value, "");
+ static_assert(std::is_trivially_destructible<Closure>::value, "");
+#endif
+
+ memcpy(fBuf, &closure, sizeof(closure));
+ fCall = [](const void* buf, Args... args) {
+ auto closure = (const Closure*)buf;
+ return (*closure)(args...);
+ };
+ }
+
+ R operator()(Args... args) const {
+ SkASSERT(fCall);
+ return fCall(fBuf, std::forward<Args>(args)...);
+ }
+
+ explicit operator bool() const { return fCall != nullptr; }
+
+ void reset() { fCall = nullptr; }
+
+private:
+ using Call = R(const void* buf, Args...);
+ Call* fCall = nullptr;
+ size_t fBuf[4];
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/gl/GrGLInterface.h b/gfx/skia/skia/include/gpu/gl/GrGLInterface.h
new file mode 100644
index 0000000000..64ca419b9b
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/gl/GrGLInterface.h
@@ -0,0 +1,340 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLInterface_DEFINED
+#define GrGLInterface_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/gpu/gl/GrGLExtensions.h"
+#include "include/gpu/gl/GrGLFunctions.h"
+
+////////////////////////////////////////////////////////////////////////////////
+
+typedef void(*GrGLFuncPtr)();
+struct GrGLInterface;
+
+
+/**
+ * Rather than depend on platform-specific GL headers and libraries, we require
+ * the client to provide a struct of GL function pointers. This struct can be
+ * specified per-GrContext as a parameter to GrContext::MakeGL. If no interface is
+ * passed to MakeGL then a default GL interface is created using GrGLMakeNativeInterface().
+ * If this returns nullptr then GrContext::MakeGL() will fail.
+ *
+ * The implementation of GrGLMakeNativeInterface is platform-specific. Several
+ * implementations have been provided (for GLX, WGL, EGL, etc), along with an
+ * implementation that simply returns nullptr. Clients should select the most
+ * appropriate one to build.
+ */
+SK_API sk_sp<const GrGLInterface> GrGLMakeNativeInterface();
+
+/**
+ * GrContext uses the following interface to make all calls into OpenGL. When a
+ * GrContext is created it is given a GrGLInterface. The interface's function
+ * pointers must be valid for the OpenGL context associated with the GrContext.
+ * On some platforms, such as Windows, function pointers for OpenGL extensions
+ * may vary between OpenGL contexts. So the caller must be careful to use a
+ * GrGLInterface initialized for the correct context. All functions that should
+ * be available based on the OpenGL's version and extension string must be
+ * non-NULL or GrContext creation will fail. This can be tested with the
+ * validate() method when the OpenGL context has been made current.
+ */
+struct SK_API GrGLInterface : public SkRefCnt {
+private:
+ using INHERITED = SkRefCnt;
+
+#if GR_GL_CHECK_ERROR
+ // This is here to avoid having our debug code that checks for a GL error after most GL calls
+ // accidentally swallow an OOM that should be reported.
+ mutable bool fOOMed = false;
+ bool fSuppressErrorLogging = false;
+#endif
+
+public:
+ GrGLInterface();
+
+ // Validates that the GrGLInterface supports its advertised standard. This means the necessary
+ // function pointers have been initialized for both the GL version and any advertised
+ // extensions.
+ bool validate() const;
+
+#if GR_GL_CHECK_ERROR
+ GrGLenum checkError(const char* location, const char* call) const;
+ bool checkAndResetOOMed() const;
+ void suppressErrorLogging();
+#endif
+
+#if GR_TEST_UTILS
+ GrGLInterface(const GrGLInterface& that)
+ : fStandard(that.fStandard)
+ , fExtensions(that.fExtensions)
+ , fFunctions(that.fFunctions) {}
+#endif
+
+ // Indicates the type of GL implementation
+ union {
+ GrGLStandard fStandard;
+ GrGLStandard fBindingsExported; // Legacy name, will be remove when Chromium is updated.
+ };
+
+ GrGLExtensions fExtensions;
+
+ bool hasExtension(const char ext[]) const { return fExtensions.has(ext); }
+
+ /**
+ * The function pointers are in a struct so that we can have a compiler generated assignment
+ * operator.
+ */
+ struct Functions {
+ GrGLFunction<GrGLActiveTextureFn> fActiveTexture;
+ GrGLFunction<GrGLAttachShaderFn> fAttachShader;
+ GrGLFunction<GrGLBeginQueryFn> fBeginQuery;
+ GrGLFunction<GrGLBindAttribLocationFn> fBindAttribLocation;
+ GrGLFunction<GrGLBindBufferFn> fBindBuffer;
+ GrGLFunction<GrGLBindFragDataLocationFn> fBindFragDataLocation;
+ GrGLFunction<GrGLBindFragDataLocationIndexedFn> fBindFragDataLocationIndexed;
+ GrGLFunction<GrGLBindFramebufferFn> fBindFramebuffer;
+ GrGLFunction<GrGLBindRenderbufferFn> fBindRenderbuffer;
+ GrGLFunction<GrGLBindSamplerFn> fBindSampler;
+ GrGLFunction<GrGLBindTextureFn> fBindTexture;
+ GrGLFunction<GrGLBindVertexArrayFn> fBindVertexArray;
+ GrGLFunction<GrGLBlendBarrierFn> fBlendBarrier;
+ GrGLFunction<GrGLBlendColorFn> fBlendColor;
+ GrGLFunction<GrGLBlendEquationFn> fBlendEquation;
+ GrGLFunction<GrGLBlendFuncFn> fBlendFunc;
+ GrGLFunction<GrGLBlitFramebufferFn> fBlitFramebuffer;
+ GrGLFunction<GrGLBufferDataFn> fBufferData;
+ GrGLFunction<GrGLBufferSubDataFn> fBufferSubData;
+ GrGLFunction<GrGLCheckFramebufferStatusFn> fCheckFramebufferStatus;
+ GrGLFunction<GrGLClearFn> fClear;
+ GrGLFunction<GrGLClearColorFn> fClearColor;
+ GrGLFunction<GrGLClearStencilFn> fClearStencil;
+ GrGLFunction<GrGLClearTexImageFn> fClearTexImage;
+ GrGLFunction<GrGLClearTexSubImageFn> fClearTexSubImage;
+ GrGLFunction<GrGLColorMaskFn> fColorMask;
+ GrGLFunction<GrGLCompileShaderFn> fCompileShader;
+ GrGLFunction<GrGLCompressedTexImage2DFn> fCompressedTexImage2D;
+ GrGLFunction<GrGLCompressedTexSubImage2DFn> fCompressedTexSubImage2D;
+ GrGLFunction<GrGLCopyBufferSubDataFn> fCopyBufferSubData;
+ GrGLFunction<GrGLCopyTexSubImage2DFn> fCopyTexSubImage2D;
+ GrGLFunction<GrGLCreateProgramFn> fCreateProgram;
+ GrGLFunction<GrGLCreateShaderFn> fCreateShader;
+ GrGLFunction<GrGLCullFaceFn> fCullFace;
+ GrGLFunction<GrGLDeleteBuffersFn> fDeleteBuffers;
+ GrGLFunction<GrGLDeleteFencesFn> fDeleteFences;
+ GrGLFunction<GrGLDeleteFramebuffersFn> fDeleteFramebuffers;
+ GrGLFunction<GrGLDeleteProgramFn> fDeleteProgram;
+ GrGLFunction<GrGLDeleteQueriesFn> fDeleteQueries;
+ GrGLFunction<GrGLDeleteRenderbuffersFn> fDeleteRenderbuffers;
+ GrGLFunction<GrGLDeleteSamplersFn> fDeleteSamplers;
+ GrGLFunction<GrGLDeleteShaderFn> fDeleteShader;
+ GrGLFunction<GrGLDeleteTexturesFn> fDeleteTextures;
+ GrGLFunction<GrGLDeleteVertexArraysFn> fDeleteVertexArrays;
+ GrGLFunction<GrGLDepthMaskFn> fDepthMask;
+ GrGLFunction<GrGLDisableFn> fDisable;
+ GrGLFunction<GrGLDisableVertexAttribArrayFn> fDisableVertexAttribArray;
+ GrGLFunction<GrGLDrawArraysFn> fDrawArrays;
+ GrGLFunction<GrGLDrawArraysIndirectFn> fDrawArraysIndirect;
+ GrGLFunction<GrGLDrawArraysInstancedFn> fDrawArraysInstanced;
+ GrGLFunction<GrGLDrawBufferFn> fDrawBuffer;
+ GrGLFunction<GrGLDrawBuffersFn> fDrawBuffers;
+ GrGLFunction<GrGLDrawElementsFn> fDrawElements;
+ GrGLFunction<GrGLDrawElementsIndirectFn> fDrawElementsIndirect;
+ GrGLFunction<GrGLDrawElementsInstancedFn> fDrawElementsInstanced;
+ GrGLFunction<GrGLDrawRangeElementsFn> fDrawRangeElements;
+ GrGLFunction<GrGLEnableFn> fEnable;
+ GrGLFunction<GrGLEnableVertexAttribArrayFn> fEnableVertexAttribArray;
+ GrGLFunction<GrGLEndQueryFn> fEndQuery;
+ GrGLFunction<GrGLFinishFn> fFinish;
+ GrGLFunction<GrGLFinishFenceFn> fFinishFence;
+ GrGLFunction<GrGLFlushFn> fFlush;
+ GrGLFunction<GrGLFlushMappedBufferRangeFn> fFlushMappedBufferRange;
+ GrGLFunction<GrGLFramebufferRenderbufferFn> fFramebufferRenderbuffer;
+ GrGLFunction<GrGLFramebufferTexture2DFn> fFramebufferTexture2D;
+ GrGLFunction<GrGLFramebufferTexture2DMultisampleFn> fFramebufferTexture2DMultisample;
+ GrGLFunction<GrGLFrontFaceFn> fFrontFace;
+ GrGLFunction<GrGLGenBuffersFn> fGenBuffers;
+ GrGLFunction<GrGLGenFencesFn> fGenFences;
+ GrGLFunction<GrGLGenFramebuffersFn> fGenFramebuffers;
+ GrGLFunction<GrGLGenerateMipmapFn> fGenerateMipmap;
+ GrGLFunction<GrGLGenQueriesFn> fGenQueries;
+ GrGLFunction<GrGLGenRenderbuffersFn> fGenRenderbuffers;
+ GrGLFunction<GrGLGenSamplersFn> fGenSamplers;
+ GrGLFunction<GrGLGenTexturesFn> fGenTextures;
+ GrGLFunction<GrGLGenVertexArraysFn> fGenVertexArrays;
+ GrGLFunction<GrGLGetBufferParameterivFn> fGetBufferParameteriv;
+ GrGLFunction<GrGLGetErrorFn> fGetError;
+ GrGLFunction<GrGLGetFramebufferAttachmentParameterivFn> fGetFramebufferAttachmentParameteriv;
+ GrGLFunction<GrGLGetFloatvFn> fGetFloatv;
+ GrGLFunction<GrGLGetIntegervFn> fGetIntegerv;
+ GrGLFunction<GrGLGetMultisamplefvFn> fGetMultisamplefv;
+ GrGLFunction<GrGLGetProgramBinaryFn> fGetProgramBinary;
+ GrGLFunction<GrGLGetProgramInfoLogFn> fGetProgramInfoLog;
+ GrGLFunction<GrGLGetProgramivFn> fGetProgramiv;
+ GrGLFunction<GrGLGetQueryObjecti64vFn> fGetQueryObjecti64v;
+ GrGLFunction<GrGLGetQueryObjectivFn> fGetQueryObjectiv;
+ GrGLFunction<GrGLGetQueryObjectui64vFn> fGetQueryObjectui64v;
+ GrGLFunction<GrGLGetQueryObjectuivFn> fGetQueryObjectuiv;
+ GrGLFunction<GrGLGetQueryivFn> fGetQueryiv;
+ GrGLFunction<GrGLGetRenderbufferParameterivFn> fGetRenderbufferParameteriv;
+ GrGLFunction<GrGLGetShaderInfoLogFn> fGetShaderInfoLog;
+ GrGLFunction<GrGLGetShaderivFn> fGetShaderiv;
+ GrGLFunction<GrGLGetShaderPrecisionFormatFn> fGetShaderPrecisionFormat;
+ GrGLFunction<GrGLGetStringFn> fGetString;
+ GrGLFunction<GrGLGetStringiFn> fGetStringi;
+ GrGLFunction<GrGLGetTexLevelParameterivFn> fGetTexLevelParameteriv;
+ GrGLFunction<GrGLGetUniformLocationFn> fGetUniformLocation;
+ GrGLFunction<GrGLInsertEventMarkerFn> fInsertEventMarker;
+ GrGLFunction<GrGLInvalidateBufferDataFn> fInvalidateBufferData;
+ GrGLFunction<GrGLInvalidateBufferSubDataFn> fInvalidateBufferSubData;
+ GrGLFunction<GrGLInvalidateFramebufferFn> fInvalidateFramebuffer;
+ GrGLFunction<GrGLInvalidateSubFramebufferFn> fInvalidateSubFramebuffer;
+ GrGLFunction<GrGLInvalidateTexImageFn> fInvalidateTexImage;
+ GrGLFunction<GrGLInvalidateTexSubImageFn> fInvalidateTexSubImage;
+ GrGLFunction<GrGLIsTextureFn> fIsTexture;
+ GrGLFunction<GrGLLineWidthFn> fLineWidth;
+ GrGLFunction<GrGLLinkProgramFn> fLinkProgram;
+ GrGLFunction<GrGLProgramBinaryFn> fProgramBinary;
+ GrGLFunction<GrGLProgramParameteriFn> fProgramParameteri;
+ GrGLFunction<GrGLMapBufferFn> fMapBuffer;
+ GrGLFunction<GrGLMapBufferRangeFn> fMapBufferRange;
+ GrGLFunction<GrGLMapBufferSubDataFn> fMapBufferSubData;
+ GrGLFunction<GrGLMapTexSubImage2DFn> fMapTexSubImage2D;
+ GrGLFunction<GrGLMemoryBarrierFn> fMemoryBarrier;
+ GrGLFunction<GrGLDrawArraysInstancedBaseInstanceFn> fDrawArraysInstancedBaseInstance;
+ GrGLFunction<GrGLDrawElementsInstancedBaseVertexBaseInstanceFn> fDrawElementsInstancedBaseVertexBaseInstance;
+ GrGLFunction<GrGLMultiDrawArraysIndirectFn> fMultiDrawArraysIndirect;
+ GrGLFunction<GrGLMultiDrawElementsIndirectFn> fMultiDrawElementsIndirect;
+ GrGLFunction<GrGLMultiDrawArraysInstancedBaseInstanceFn> fMultiDrawArraysInstancedBaseInstance;
+ GrGLFunction<GrGLMultiDrawElementsInstancedBaseVertexBaseInstanceFn> fMultiDrawElementsInstancedBaseVertexBaseInstance;
+ GrGLFunction<GrGLPatchParameteriFn> fPatchParameteri;
+ GrGLFunction<GrGLPixelStoreiFn> fPixelStorei;
+ GrGLFunction<GrGLPolygonModeFn> fPolygonMode;
+ GrGLFunction<GrGLPopGroupMarkerFn> fPopGroupMarker;
+ GrGLFunction<GrGLPushGroupMarkerFn> fPushGroupMarker;
+ GrGLFunction<GrGLQueryCounterFn> fQueryCounter;
+ GrGLFunction<GrGLReadBufferFn> fReadBuffer;
+ GrGLFunction<GrGLReadPixelsFn> fReadPixels;
+ GrGLFunction<GrGLRenderbufferStorageFn> fRenderbufferStorage;
+
+ // On OpenGL ES there are multiple incompatible extensions that add support for MSAA
+ // and ES3 adds MSAA support to the standard. On an ES3 driver we may still use the
+ // older extensions for performance reasons or due to ES3 driver bugs. We want the function
+ // that creates the GrGLInterface to provide all available functions and internally
+ // we will select among them. They all have a method called glRenderbufferStorageMultisample*.
+ // So we have separate function pointers for GL_IMG/EXT_multisampled_to_texture,
+ // GL_CHROMIUM/ANGLE_framebuffer_multisample/ES3, and GL_APPLE_framebuffer_multisample
+ // variations.
+ //
+ // If a driver supports multiple GL_ARB_framebuffer_multisample-style extensions then we will
+ // assume the function pointers for the standard (or equivalent GL_ARB) version have
+ // been preferred over GL_EXT, GL_CHROMIUM, or GL_ANGLE variations that have reduced
+ // functionality.
+
+ // GL_EXT_multisampled_render_to_texture (preferred) or GL_IMG_multisampled_render_to_texture
+ GrGLFunction<GrGLRenderbufferStorageMultisampleFn> fRenderbufferStorageMultisampleES2EXT;
+ // GL_APPLE_framebuffer_multisample
+ GrGLFunction<GrGLRenderbufferStorageMultisampleFn> fRenderbufferStorageMultisampleES2APPLE;
+
+ // This is used to store the pointer for GL_ARB/EXT/ANGLE/CHROMIUM_framebuffer_multisample or
+ // the standard function in ES3+ or GL 3.0+.
+ GrGLFunction<GrGLRenderbufferStorageMultisampleFn> fRenderbufferStorageMultisample;
+
+ // Pointer to BindUniformLocationCHROMIUM from the GL_CHROMIUM_bind_uniform_location extension.
+ GrGLFunction<GrGLBindUniformLocationFn> fBindUniformLocation;
+
+ GrGLFunction<GrGLResolveMultisampleFramebufferFn> fResolveMultisampleFramebuffer;
+ GrGLFunction<GrGLSamplerParameterfFn> fSamplerParameterf;
+ GrGLFunction<GrGLSamplerParameteriFn> fSamplerParameteri;
+ GrGLFunction<GrGLSamplerParameterivFn> fSamplerParameteriv;
+ GrGLFunction<GrGLScissorFn> fScissor;
+ GrGLFunction<GrGLSetFenceFn> fSetFence;
+ GrGLFunction<GrGLShaderSourceFn> fShaderSource;
+ GrGLFunction<GrGLStencilFuncFn> fStencilFunc;
+ GrGLFunction<GrGLStencilFuncSeparateFn> fStencilFuncSeparate;
+ GrGLFunction<GrGLStencilMaskFn> fStencilMask;
+ GrGLFunction<GrGLStencilMaskSeparateFn> fStencilMaskSeparate;
+ GrGLFunction<GrGLStencilOpFn> fStencilOp;
+ GrGLFunction<GrGLStencilOpSeparateFn> fStencilOpSeparate;
+ GrGLFunction<GrGLTestFenceFn> fTestFence;
+ GrGLFunction<GrGLTexBufferFn> fTexBuffer;
+ GrGLFunction<GrGLTexBufferRangeFn> fTexBufferRange;
+ GrGLFunction<GrGLTexImage2DFn> fTexImage2D;
+ GrGLFunction<GrGLTexParameterfFn> fTexParameterf;
+ GrGLFunction<GrGLTexParameterfvFn> fTexParameterfv;
+ GrGLFunction<GrGLTexParameteriFn> fTexParameteri;
+ GrGLFunction<GrGLTexParameterivFn> fTexParameteriv;
+ GrGLFunction<GrGLTexSubImage2DFn> fTexSubImage2D;
+ GrGLFunction<GrGLTexStorage2DFn> fTexStorage2D;
+ GrGLFunction<GrGLTextureBarrierFn> fTextureBarrier;
+ GrGLFunction<GrGLDiscardFramebufferFn> fDiscardFramebuffer;
+ GrGLFunction<GrGLUniform1fFn> fUniform1f;
+ GrGLFunction<GrGLUniform1iFn> fUniform1i;
+ GrGLFunction<GrGLUniform1fvFn> fUniform1fv;
+ GrGLFunction<GrGLUniform1ivFn> fUniform1iv;
+ GrGLFunction<GrGLUniform2fFn> fUniform2f;
+ GrGLFunction<GrGLUniform2iFn> fUniform2i;
+ GrGLFunction<GrGLUniform2fvFn> fUniform2fv;
+ GrGLFunction<GrGLUniform2ivFn> fUniform2iv;
+ GrGLFunction<GrGLUniform3fFn> fUniform3f;
+ GrGLFunction<GrGLUniform3iFn> fUniform3i;
+ GrGLFunction<GrGLUniform3fvFn> fUniform3fv;
+ GrGLFunction<GrGLUniform3ivFn> fUniform3iv;
+ GrGLFunction<GrGLUniform4fFn> fUniform4f;
+ GrGLFunction<GrGLUniform4iFn> fUniform4i;
+ GrGLFunction<GrGLUniform4fvFn> fUniform4fv;
+ GrGLFunction<GrGLUniform4ivFn> fUniform4iv;
+ GrGLFunction<GrGLUniformMatrix2fvFn> fUniformMatrix2fv;
+ GrGLFunction<GrGLUniformMatrix3fvFn> fUniformMatrix3fv;
+ GrGLFunction<GrGLUniformMatrix4fvFn> fUniformMatrix4fv;
+ GrGLFunction<GrGLUnmapBufferFn> fUnmapBuffer;
+ GrGLFunction<GrGLUnmapBufferSubDataFn> fUnmapBufferSubData;
+ GrGLFunction<GrGLUnmapTexSubImage2DFn> fUnmapTexSubImage2D;
+ GrGLFunction<GrGLUseProgramFn> fUseProgram;
+ GrGLFunction<GrGLVertexAttrib1fFn> fVertexAttrib1f;
+ GrGLFunction<GrGLVertexAttrib2fvFn> fVertexAttrib2fv;
+ GrGLFunction<GrGLVertexAttrib3fvFn> fVertexAttrib3fv;
+ GrGLFunction<GrGLVertexAttrib4fvFn> fVertexAttrib4fv;
+ GrGLFunction<GrGLVertexAttribDivisorFn> fVertexAttribDivisor;
+ GrGLFunction<GrGLVertexAttribIPointerFn> fVertexAttribIPointer;
+ GrGLFunction<GrGLVertexAttribPointerFn> fVertexAttribPointer;
+ GrGLFunction<GrGLViewportFn> fViewport;
+
+ /* ARB_sync */
+ GrGLFunction<GrGLFenceSyncFn> fFenceSync;
+ GrGLFunction<GrGLIsSyncFn> fIsSync;
+ GrGLFunction<GrGLClientWaitSyncFn> fClientWaitSync;
+ GrGLFunction<GrGLWaitSyncFn> fWaitSync;
+ GrGLFunction<GrGLDeleteSyncFn> fDeleteSync;
+
+ /* ARB_internalforamt_query */
+ GrGLFunction<GrGLGetInternalformativFn> fGetInternalformativ;
+
+ /* KHR_debug */
+ GrGLFunction<GrGLDebugMessageControlFn> fDebugMessageControl;
+ GrGLFunction<GrGLDebugMessageInsertFn> fDebugMessageInsert;
+ GrGLFunction<GrGLDebugMessageCallbackFn> fDebugMessageCallback;
+ GrGLFunction<GrGLGetDebugMessageLogFn> fGetDebugMessageLog;
+ GrGLFunction<GrGLPushDebugGroupFn> fPushDebugGroup;
+ GrGLFunction<GrGLPopDebugGroupFn> fPopDebugGroup;
+ GrGLFunction<GrGLObjectLabelFn> fObjectLabel;
+
+ /* EXT_window_rectangles */
+ GrGLFunction<GrGLWindowRectanglesFn> fWindowRectangles;
+
+ /* GL_QCOM_tiled_rendering */
+ GrGLFunction<GrGLStartTilingFn> fStartTiling;
+ GrGLFunction<GrGLEndTilingFn> fEndTiling;
+ } fFunctions;
+
+#if GR_TEST_UTILS
+ // This exists for internal testing.
+ virtual void abandon() const;
+#endif
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/gl/GrGLTypes.h b/gfx/skia/skia/include/gpu/gl/GrGLTypes.h
new file mode 100644
index 0000000000..3af4802eaa
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/gl/GrGLTypes.h
@@ -0,0 +1,208 @@
+
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLTypes_DEFINED
+#define GrGLTypes_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/gpu/GpuTypes.h"
+#include "include/gpu/gl/GrGLConfig.h"
+
+/**
+ * Classifies GL contexts by which standard they implement (currently as OpenGL vs. OpenGL ES).
+ */
+enum GrGLStandard {
+ kNone_GrGLStandard,
+ kGL_GrGLStandard,
+ kGLES_GrGLStandard,
+ kWebGL_GrGLStandard,
+};
+static const int kGrGLStandardCnt = 4;
+
+// The following allow certain interfaces to be turned off at compile time
+// (for example, to lower code size).
+#if SK_ASSUME_GL_ES
+ #define GR_IS_GR_GL(standard) false
+ #define GR_IS_GR_GL_ES(standard) true
+ #define GR_IS_GR_WEBGL(standard) false
+ #define SK_DISABLE_GL_INTERFACE 1
+ #define SK_DISABLE_WEBGL_INTERFACE 1
+#elif SK_ASSUME_GL
+ #define GR_IS_GR_GL(standard) true
+ #define GR_IS_GR_GL_ES(standard) false
+ #define GR_IS_GR_WEBGL(standard) false
+ #define SK_DISABLE_GL_ES_INTERFACE 1
+ #define SK_DISABLE_WEBGL_INTERFACE 1
+#elif SK_ASSUME_WEBGL
+ #define GR_IS_GR_GL(standard) false
+ #define GR_IS_GR_GL_ES(standard) false
+ #define GR_IS_GR_WEBGL(standard) true
+ #define SK_DISABLE_GL_ES_INTERFACE 1
+ #define SK_DISABLE_GL_INTERFACE 1
+#else
+ #define GR_IS_GR_GL(standard) (kGL_GrGLStandard == standard)
+ #define GR_IS_GR_GL_ES(standard) (kGLES_GrGLStandard == standard)
+ #define GR_IS_GR_WEBGL(standard) (kWebGL_GrGLStandard == standard)
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * The supported GL formats represented as an enum. Actual support by GrContext depends on GL
+ * context version and extensions.
+ */
+enum class GrGLFormat {
+ kUnknown,
+
+ kRGBA8,
+ kR8,
+ kALPHA8,
+ kLUMINANCE8,
+ kLUMINANCE8_ALPHA8,
+ kBGRA8,
+ kRGB565,
+ kRGBA16F,
+ kR16F,
+ kRGB8,
+ kRGBX8,
+ kRG8,
+ kRGB10_A2,
+ kRGBA4,
+ kSRGB8_ALPHA8,
+ kCOMPRESSED_ETC1_RGB8,
+ kCOMPRESSED_RGB8_ETC2,
+ kCOMPRESSED_RGB8_BC1,
+ kCOMPRESSED_RGBA8_BC1,
+ kR16,
+ kRG16,
+ kRGBA16,
+ kRG16F,
+ kLUMINANCE16F,
+
+ kLastColorFormat = kLUMINANCE16F,
+
+ // Depth/Stencil formats
+ kSTENCIL_INDEX8,
+ kSTENCIL_INDEX16,
+ kDEPTH24_STENCIL8,
+
+ kLast = kDEPTH24_STENCIL8
+};
+
+///////////////////////////////////////////////////////////////////////////////
+/**
+ * Declares typedefs for all the GL functions used in GrGLInterface
+ */
+
+typedef unsigned int GrGLenum;
+typedef unsigned char GrGLboolean;
+typedef unsigned int GrGLbitfield;
+typedef signed char GrGLbyte;
+typedef char GrGLchar;
+typedef short GrGLshort;
+typedef int GrGLint;
+typedef int GrGLsizei;
+typedef int64_t GrGLint64;
+typedef unsigned char GrGLubyte;
+typedef unsigned short GrGLushort;
+typedef unsigned int GrGLuint;
+typedef uint64_t GrGLuint64;
+typedef unsigned short int GrGLhalf;
+typedef float GrGLfloat;
+typedef float GrGLclampf;
+typedef double GrGLdouble;
+typedef double GrGLclampd;
+typedef void GrGLvoid;
+#ifdef _WIN64
+typedef signed long long int GrGLintptr;
+typedef signed long long int GrGLsizeiptr;
+#else
+typedef signed long int GrGLintptr;
+typedef signed long int GrGLsizeiptr;
+#endif
+typedef void* GrGLeglImage;
+typedef struct __GLsync* GrGLsync;
+
+struct GrGLDrawArraysIndirectCommand {
+ GrGLuint fCount;
+ GrGLuint fInstanceCount;
+ GrGLuint fFirst;
+ GrGLuint fBaseInstance; // Requires EXT_base_instance on ES.
+};
+
+// static_asserts must have messages in this file because its included in C++14 client code.
+static_assert(16 == sizeof(GrGLDrawArraysIndirectCommand), "");
+
+struct GrGLDrawElementsIndirectCommand {
+ GrGLuint fCount;
+ GrGLuint fInstanceCount;
+ GrGLuint fFirstIndex;
+ GrGLuint fBaseVertex;
+ GrGLuint fBaseInstance; // Requires EXT_base_instance on ES.
+};
+
+static_assert(20 == sizeof(GrGLDrawElementsIndirectCommand), "");
+
+/**
+ * KHR_debug
+ */
+typedef void (GR_GL_FUNCTION_TYPE* GRGLDEBUGPROC)(GrGLenum source,
+ GrGLenum type,
+ GrGLuint id,
+ GrGLenum severity,
+ GrGLsizei length,
+ const GrGLchar* message,
+ const void* userParam);
+
+/**
+ * EGL types.
+ */
+typedef void* GrEGLImage;
+typedef void* GrEGLDisplay;
+typedef void* GrEGLContext;
+typedef void* GrEGLClientBuffer;
+typedef unsigned int GrEGLenum;
+typedef int32_t GrEGLint;
+typedef unsigned int GrEGLBoolean;
+
+///////////////////////////////////////////////////////////////////////////////
+/**
+ * Types for interacting with GL resources created externally to Skia. GrBackendObjects for GL
+ * textures are really const GrGLTexture*. The fFormat here should be a sized, internal format
+ * for the texture. We will try to use the sized format if the GL Context supports it, otherwise
+ * we will internally fall back to using the base internal formats.
+ */
+struct GrGLTextureInfo {
+ GrGLenum fTarget;
+ GrGLuint fID;
+ GrGLenum fFormat = 0;
+
+ bool operator==(const GrGLTextureInfo& that) const {
+ return fTarget == that.fTarget && fID == that.fID && fFormat == that.fFormat;
+ }
+};
+
+struct GrGLFramebufferInfo {
+ GrGLuint fFBOID;
+ GrGLenum fFormat = 0;
+
+ bool operator==(const GrGLFramebufferInfo& that) const {
+ return fFBOID == that.fFBOID && fFormat == that.fFormat;
+ }
+};
+
+struct GrGLSurfaceInfo {
+ uint32_t fSampleCount = 1;
+ uint32_t fLevelCount = 0;
+ skgpu::Protected fProtected = skgpu::Protected::kNo;
+
+ GrGLenum fTarget = 0;
+ GrGLenum fFormat = 0;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/gl/egl/GrGLMakeEGLInterface.h b/gfx/skia/skia/include/gpu/gl/egl/GrGLMakeEGLInterface.h
new file mode 100644
index 0000000000..a3eb420b04
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/gl/egl/GrGLMakeEGLInterface.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/gpu/GrTypes.h"
+
+#include "include/core/SkRefCnt.h"
+
+struct GrGLInterface;
+
+sk_sp<const GrGLInterface> GrGLMakeEGLInterface();
diff --git a/gfx/skia/skia/include/gpu/gl/glx/GrGLMakeGLXInterface.h b/gfx/skia/skia/include/gpu/gl/glx/GrGLMakeGLXInterface.h
new file mode 100644
index 0000000000..b49cde4589
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/gl/glx/GrGLMakeGLXInterface.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/gpu/GrTypes.h"
+
+#include "include/core/SkRefCnt.h"
+
+struct GrGLInterface;
+
+sk_sp<const GrGLInterface> GrGLMakeGLXInterface();
diff --git a/gfx/skia/skia/include/gpu/graphite/BackendTexture.h b/gfx/skia/skia/include/gpu/graphite/BackendTexture.h
new file mode 100644
index 0000000000..2502b819a2
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/graphite/BackendTexture.h
@@ -0,0 +1,153 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_graphite_BackendTexture_DEFINED
+#define skgpu_graphite_BackendTexture_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSize.h"
+#include "include/gpu/graphite/GraphiteTypes.h"
+#include "include/gpu/graphite/TextureInfo.h"
+
+#ifdef SK_DAWN
+#include "include/gpu/graphite/dawn/DawnTypes.h"
+#endif
+
+#ifdef SK_METAL
+#include "include/gpu/graphite/mtl/MtlGraphiteTypes.h"
+#endif
+
+#ifdef SK_VULKAN
+#include "include/private/gpu/vk/SkiaVulkan.h"
+#endif
+
+namespace skgpu {
+class MutableTextureState;
+class MutableTextureStateRef;
+}
+
+namespace skgpu::graphite {
+
+class BackendTexture {
+public:
+ BackendTexture();
+#ifdef SK_DAWN
+ // Create a BackendTexture from a wgpu::Texture. Texture info will be
+ // queried from the texture. Comparing to wgpu::TextureView,
+ // SkImage::readPixels(), SkSurface::readPixels() and
+ // SkSurface::writePixels() are implemented by direct buffer copy. They
+ // should be more efficient. For wgpu::TextureView, those methods will use
+ // create an intermediate wgpu::Texture, and use it to transfer pixels.
+ // Note: for better performance, using wgpu::Texture IS RECOMMENDED.
+ BackendTexture(wgpu::Texture texture);
+ // Create a BackendTexture from a wgpu::TextureView. Texture dimensions and
+ // info have to be provided.
+ // Note: this method is for importing wgpu::TextureView from wgpu::SwapChain
+ // only.
+ BackendTexture(SkISize dimensions,
+ const DawnTextureInfo& info,
+ wgpu::TextureView textureView);
+#endif
+#ifdef SK_METAL
+ // The BackendTexture will not call retain or release on the passed in MtlHandle. Thus the
+ // client must keep the MtlHandle valid until they are no longer using the BackendTexture.
+ BackendTexture(SkISize dimensions, MtlHandle mtlTexture);
+#endif
+
+#ifdef SK_VULKAN
+ BackendTexture(SkISize dimensions,
+ const VulkanTextureInfo&,
+ VkImageLayout,
+ uint32_t queueFamilyIndex,
+ VkImage);
+#endif
+
+ BackendTexture(const BackendTexture&);
+
+ ~BackendTexture();
+
+ BackendTexture& operator=(const BackendTexture&);
+
+ bool operator==(const BackendTexture&) const;
+ bool operator!=(const BackendTexture& that) const { return !(*this == that); }
+
+ bool isValid() const { return fInfo.isValid(); }
+ BackendApi backend() const { return fInfo.backend(); }
+
+ SkISize dimensions() const { return fDimensions; }
+
+ const TextureInfo& info() const { return fInfo; }
+
+ // If the client changes any of the mutable backend of the GrBackendTexture they should call
+ // this function to inform Skia that those values have changed. The backend API specific state
+ // that can be set from this function are:
+ //
+ // Vulkan: VkImageLayout and QueueFamilyIndex
+ void setMutableState(const skgpu::MutableTextureState&);
+
+#ifdef SK_DAWN
+ wgpu::Texture getDawnTexture() const;
+ wgpu::TextureView getDawnTextureView() const;
+#endif
+#ifdef SK_METAL
+ MtlHandle getMtlTexture() const;
+#endif
+
+#ifdef SK_VULKAN
+ VkImage getVkImage() const;
+ VkImageLayout getVkImageLayout() const;
+ uint32_t getVkQueueFamilyIndex() const;
+#endif
+
+private:
+ sk_sp<MutableTextureStateRef> mutableState() const;
+
+ SkISize fDimensions;
+ TextureInfo fInfo;
+
+ sk_sp<MutableTextureStateRef> fMutableState;
+
+#ifdef SK_DAWN
+ struct Dawn {
+ Dawn(wgpu::Texture texture) : fTexture(std::move(texture)) {}
+ Dawn(wgpu::TextureView textureView) : fTextureView(std::move(textureView)) {}
+
+ bool operator==(const Dawn& that) const {
+ return fTexture.Get() == that.fTexture.Get() &&
+ fTextureView.Get() == that.fTextureView.Get();
+ }
+ bool operator!=(const Dawn& that) const {
+ return !this->operator==(that);
+ }
+ Dawn& operator=(const Dawn& that) {
+ fTexture = that.fTexture;
+ fTextureView = that.fTextureView;
+ return *this;
+ }
+
+ wgpu::Texture fTexture;
+ wgpu::TextureView fTextureView;
+ };
+#endif
+
+ union {
+#ifdef SK_DAWN
+ Dawn fDawn;
+#endif
+#ifdef SK_METAL
+ MtlHandle fMtlTexture;
+#endif
+#ifdef SK_VULKAN
+ VkImage fVkImage;
+#endif
+ };
+};
+
+} // namespace skgpu::graphite
+
+#endif // skgpu_graphite_BackendTexture_DEFINED
+
diff --git a/gfx/skia/skia/include/gpu/graphite/Context.h b/gfx/skia/skia/include/gpu/graphite/Context.h
new file mode 100644
index 0000000000..d6da45ad4c
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/graphite/Context.h
@@ -0,0 +1,166 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_graphite_Context_DEFINED
+#define skgpu_graphite_Context_DEFINED
+
+#include "include/core/SkImage.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkShader.h"
+#include "include/gpu/graphite/ContextOptions.h"
+#include "include/gpu/graphite/GraphiteTypes.h"
+#include "include/gpu/graphite/Recorder.h"
+#include "include/private/base/SingleOwner.h"
+
+#include <memory>
+
+class SkRuntimeEffect;
+
+namespace skgpu::graphite {
+
+class BackendTexture;
+class Buffer;
+class ClientMappedBufferManager;
+class Context;
+class ContextPriv;
+class GlobalCache;
+class PaintOptions;
+class PlotUploadTracker;
+class QueueManager;
+class Recording;
+class ResourceProvider;
+class SharedContext;
+class TextureProxy;
+
+class SK_API Context final {
+public:
+ Context(const Context&) = delete;
+ Context(Context&&) = delete;
+ Context& operator=(const Context&) = delete;
+ Context& operator=(Context&&) = delete;
+
+ ~Context();
+
+ BackendApi backend() const;
+
+ std::unique_ptr<Recorder> makeRecorder(const RecorderOptions& = {});
+
+ bool insertRecording(const InsertRecordingInfo&);
+ bool submit(SyncToCpu = SyncToCpu::kNo);
+
+ void asyncReadPixels(const SkImage* image,
+ const SkColorInfo& dstColorInfo,
+ const SkIRect& srcRect,
+ SkImage::ReadPixelsCallback callback,
+ SkImage::ReadPixelsContext context);
+
+ void asyncReadPixels(const SkSurface* surface,
+ const SkColorInfo& dstColorInfo,
+ const SkIRect& srcRect,
+ SkImage::ReadPixelsCallback callback,
+ SkImage::ReadPixelsContext context);
+
+ /**
+ * Checks whether any asynchronous work is complete and if so calls related callbacks.
+ */
+ void checkAsyncWorkCompletion();
+
+ /**
+ * Called to delete the passed in BackendTexture. This should only be called if the
+ * BackendTexture was created by calling Recorder::createBackendTexture on a Recorder created
+ * from this Context. If the BackendTexture is not valid or does not match the BackendApi of the
+ * Context then nothing happens.
+ *
+ * Otherwise this will delete/release the backend object that is wrapped in the BackendTexture.
+ * The BackendTexture will be reset to an invalid state and should not be used again.
+ */
+ void deleteBackendTexture(BackendTexture&);
+
+ // Provides access to functions that aren't part of the public API.
+ ContextPriv priv();
+ const ContextPriv priv() const; // NOLINT(readability-const-return-type)
+
+ class ContextID {
+ public:
+ static Context::ContextID Next();
+
+ ContextID() : fID(SK_InvalidUniqueID) {}
+
+ bool operator==(const ContextID& that) const { return fID == that.fID; }
+ bool operator!=(const ContextID& that) const { return !(*this == that); }
+
+ void makeInvalid() { fID = SK_InvalidUniqueID; }
+ bool isValid() const { return fID != SK_InvalidUniqueID; }
+
+ private:
+ constexpr ContextID(uint32_t id) : fID(id) {}
+ uint32_t fID;
+ };
+
+ ContextID contextID() const { return fContextID; }
+
+protected:
+ Context(sk_sp<SharedContext>, std::unique_ptr<QueueManager>, const ContextOptions&);
+
+private:
+ friend class ContextPriv;
+ friend class ContextCtorAccessor;
+
+ SingleOwner* singleOwner() const { return &fSingleOwner; }
+
+ // Must be called in Make() to handle one-time GPU setup operations that can possibly fail and
+ // require Context::Make() to return a nullptr.
+ bool finishInitialization();
+
+ void asyncReadPixels(const TextureProxy* textureProxy,
+ const SkImageInfo& srcImageInfo,
+ const SkColorInfo& dstColorInfo,
+ const SkIRect& srcRect,
+ SkImage::ReadPixelsCallback callback,
+ SkImage::ReadPixelsContext context);
+
+ // Inserts a texture to buffer transfer task, used by asyncReadPixels methods
+ struct PixelTransferResult {
+ using ConversionFn = void(void* dst, const void* mappedBuffer);
+ // If null then the transfer could not be performed. Otherwise this buffer will contain
+ // the pixel data when the transfer is complete.
+ sk_sp<Buffer> fTransferBuffer;
+ // If this is null then the transfer buffer will contain the data in the requested
+ // color type. Otherwise, when the transfer is done this must be called to convert
+ // from the transfer buffer's color type to the requested color type.
+ std::function<ConversionFn> fPixelConverter;
+ };
+ PixelTransferResult transferPixels(const TextureProxy*,
+ const SkImageInfo& srcImageInfo,
+ const SkColorInfo& dstColorInfo,
+ const SkIRect& srcRect);
+
+ sk_sp<SharedContext> fSharedContext;
+ std::unique_ptr<ResourceProvider> fResourceProvider;
+ std::unique_ptr<QueueManager> fQueueManager;
+ std::unique_ptr<ClientMappedBufferManager> fMappedBufferManager;
+ std::unique_ptr<PlotUploadTracker> fPlotUploadTracker;
+
+ // In debug builds we guard against improper thread handling. This guard is passed to the
+ // ResourceCache for the Context.
+ mutable SingleOwner fSingleOwner;
+
+#if GRAPHITE_TEST_UTILS
+ // In test builds a Recorder may track the Context that was used to create it.
+ bool fStoreContextRefInRecorder = false;
+ // If this tracking is on, to allow the client to safely delete this Context or its Recorders
+ // in any order we must also track the Recorders created here.
+ std::vector<Recorder*> fTrackedRecorders;
+#endif
+
+ // Needed for MessageBox handling
+ const ContextID fContextID;
+};
+
+} // namespace skgpu::graphite
+
+#endif // skgpu_graphite_Context_DEFINED
diff --git a/gfx/skia/skia/include/gpu/graphite/ContextOptions.h b/gfx/skia/skia/include/gpu/graphite/ContextOptions.h
new file mode 100644
index 0000000000..2838f10b0d
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/graphite/ContextOptions.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_graphite_ContextOptions_DEFINED
+#define skgpu_graphite_ContextOptions_DEFINED
+
+namespace skgpu { class ShaderErrorHandler; }
+
+namespace skgpu::graphite {
+
+struct SK_API ContextOptions {
+ ContextOptions() {}
+
+ /**
+ * Disables correctness workarounds that are enabled for particular GPUs, OSes, or drivers.
+ * This does not affect code path choices that are made for perfomance reasons nor does it
+ * override other ContextOption settings.
+ */
+ bool fDisableDriverCorrectnessWorkarounds = false;
+
+ /**
+ * If present, use this object to report shader compilation failures. If not, report failures
+ * via SkDebugf and assert.
+ */
+ skgpu::ShaderErrorHandler* fShaderErrorHandler = nullptr;
+
+ /**
+ * Will the client make sure to only ever be executing one thread that uses the Context and all
+ * derived classes (e.g. Recorders, Recordings, etc.) at a time. If so we can possibly make some
+ * objects (e.g. VulkanMemoryAllocator) not thread safe to improve single thread performance.
+ */
+ bool fClientWillExternallySynchronizeAllThreads = false;
+
+ /**
+ * The maximum size of cache textures used for Skia's Glyph cache.
+ */
+ size_t fGlyphCacheTextureMaximumBytes = 2048 * 1024 * 4;
+
+ /**
+ * Below this threshold size in device space distance field fonts won't be used. Distance field
+ * fonts don't support hinting which is more important at smaller sizes.
+ */
+ float fMinDistanceFieldFontSize = 18;
+
+ /**
+ * Above this threshold size in device space glyphs are drawn as individual paths.
+ */
+#if defined(SK_BUILD_FOR_ANDROID)
+ float fGlyphsAsPathsFontSize = 384;
+#elif defined(SK_BUILD_FOR_MAC)
+ float fGlyphsAsPathsFontSize = 256;
+#else
+ float fGlyphsAsPathsFontSize = 324;
+#endif
+
+ /**
+ * Can the glyph atlas use multiple textures. If allowed, the each texture's size is bound by
+ * fGlypheCacheTextureMaximumBytes.
+ */
+ bool fAllowMultipleGlyphCacheTextures = true;
+ bool fSupportBilerpFromGlyphAtlas = false;
+
+#if GRAPHITE_TEST_UTILS
+ /**
+ * Private options that are only meant for testing within Skia's tools.
+ */
+
+ /**
+ * Maximum width and height of internal texture atlases.
+ */
+ int fMaxTextureAtlasSize = 2048;
+
+ /**
+ * If true, will store a pointer in Recorder that points back to the Context
+ * that created it. Used by readPixels() and other methods that normally require a Context.
+ */
+ bool fStoreContextRefInRecorder = false;
+#endif
+};
+
+} // namespace skgpu::graphite
+
+#endif // skgpu_graphite_ContextOptions
diff --git a/gfx/skia/skia/include/gpu/graphite/GraphiteTypes.h b/gfx/skia/skia/include/gpu/graphite/GraphiteTypes.h
new file mode 100644
index 0000000000..231f2a5e14
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/graphite/GraphiteTypes.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_graphite_GraphiteTypes_DEFINED
+#define skgpu_graphite_GraphiteTypes_DEFINED
+
+#include "include/core/SkPoint.h"
+#include "include/core/SkTypes.h"
+#include "include/gpu/GpuTypes.h"
+
+#include <memory>
+
+class SkSurface;
+
+namespace skgpu::graphite {
+
+class Recording;
+class Task;
+
+using GpuFinishedContext = void*;
+using GpuFinishedProc = void (*)(GpuFinishedContext finishedContext, CallbackResult);
+
+/**
+ * The fFinishedProc is called when the Recording has been submitted and finished on the GPU, or
+ * when there is a failure that caused it not to be submitted. The callback will always be called
+ * and the caller can use the callback to know it is safe to free any resources associated with
+ * the Recording that they may be holding onto. If the Recording is successfully submitted to the
+ * GPU the callback will be called with CallbackResult::kSuccess once the GPU has finished. All
+ * other cases where some failure occured it will be called with CallbackResult::kFailed.
+ *
+ * The fTargetSurface, if provided, is used as a target for any draws recorded onto a deferred
+ * canvas returned from Recorder::makeDeferredCanvas. This target surface must be provided iff
+ * the Recording contains any such draws. It must be Graphite-backed and its backing texture's
+ * TextureInfo must match the info provided to the Recorder when making the deferred canvas.
+ *
+ * fTargetTranslation is an additional translation applied to draws targeting fTargetSurface.
+ */
+struct InsertRecordingInfo {
+ Recording* fRecording = nullptr;
+
+ SkSurface* fTargetSurface = nullptr;
+ SkIVector fTargetTranslation = {0, 0};
+
+ GpuFinishedContext fFinishedContext = nullptr;
+ GpuFinishedProc fFinishedProc = nullptr;
+};
+
+/**
+ * The fFinishedProc is called when the Recording has been submitted and finished on the GPU, or
+ * when there is a failure that caused it not to be submitted. The callback will always be called
+ * and the caller can use the callback to know it is safe to free any resources associated with
+ * the Recording that they may be holding onto. If the Recording is successfully submitted to the
+ * GPU the callback will be called with CallbackResult::kSuccess once the GPU has finished. All
+ * other cases where some failure occured it will be called with CallbackResult::kFailed.
+ */
+struct InsertFinishInfo {
+ GpuFinishedContext fFinishedContext = nullptr;
+ GpuFinishedProc fFinishedProc = nullptr;
+};
+
+/**
+ * Actually submit work to the GPU and track its completion
+ */
+enum class SyncToCpu : bool {
+ kYes = true,
+ kNo = false
+};
+
+/*
+ * For Promise Images - should the Promise Image be fulfilled every time a Recording that references
+ * it is inserted into the Context.
+ */
+enum class Volatile : bool {
+ kNo = false, // only fulfilled once
+ kYes = true // fulfilled on every insertion call
+};
+
+/*
+ * Graphite's different rendering methods each only apply to certain types of draws. This
+ * enum supports decision-making regarding the different renderers and what is being drawn.
+ */
+enum DrawTypeFlags : uint8_t {
+
+ kNone = 0b000,
+
+ // SkCanvas:: drawSimpleText, drawString, drawGlyphs, drawTextBlob, drawSlug
+ kText = 0b001,
+
+ // SkCanvas::drawVertices
+ kDrawVertices = 0b010,
+
+ // All other canvas draw calls
+ kShape = 0b100,
+
+ kMostCommon = kText | kShape,
+ kAll = kText | kDrawVertices | kShape
+};
+
+} // namespace skgpu::graphite
+
+#endif // skgpu_graphite_GraphiteTypes_DEFINED
diff --git a/gfx/skia/skia/include/gpu/graphite/ImageProvider.h b/gfx/skia/skia/include/gpu/graphite/ImageProvider.h
new file mode 100644
index 0000000000..2773f03b1d
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/graphite/ImageProvider.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_graphite_ImageProvider_DEFINED
+#define skgpu_graphite_ImageProvider_DEFINED
+
+#include "include/core/SkImage.h"
+#include "include/core/SkRefCnt.h"
+
+namespace skgpu::graphite {
+
+class Recorder;
+
+/*
+ * This class provides a centralized location for clients to perform any caching of images
+ * they desire. Whenever Graphite encounters an SkImage which is not Graphite-backed
+ * it will call ImageProvider::findOrCreate. The client's derived version of this class should
+ * return a Graphite-backed version of the provided SkImage that meets the specified
+ * requirements.
+ *
+ * Skia requires that 'findOrCreate' return a Graphite-backed image that preserves the dimensions,
+ * number of channels and alpha type of the original image. The bit depth of the
+ * individual channels can change (e.g., 4444 -> 8888 is allowed).
+ * Wrt mipmapping, the returned image can have different mipmap settings than requested. If
+ * mipmapping was requested but not returned, the sampling level will be reduced to linear.
+ * If the requirements are not met by the returned image (modulo the flexibility wrt mipmapping)
+ * Graphite will drop the draw.
+ *
+ * Note: by default, Graphite will not perform any caching of images
+ *
+ * Threading concerns:
+ * If the same ImageProvider is given to multiple Recorders it is up to the
+ * client to handle any required thread synchronization. This is not limited to just
+ * restricting access to whatever map a derived class may have but extends to ensuring
+ * that an image created on one Recorder has had its creation work submitted before it
+ * is used by any work submitted by another Recording. Please note, this requirement
+ * (re the submission of creation work and image usage on different threads) is common to all
+ * graphite SkImages and isn't unique to SkImages returned by the ImageProvider.
+ *
+ * TODO(b/240996632): add documentation re shutdown order.
+ * TODO(b/240997067): add unit tests
+ */
+class SK_API ImageProvider : public SkRefCnt {
+public:
+ // If the client's derived class already has a Graphite-backed image that has the same
+ // contents as 'image' and meets the requirements, then it can be returned.
+ // makeTextureImage can always be called to create an acceptable Graphite-backed image
+ // which could then be cached.
+ virtual sk_sp<SkImage> findOrCreate(Recorder* recorder,
+ const SkImage* image,
+ SkImage::RequiredImageProperties) = 0;
+};
+
+} // namespace skgpu::graphite
+
+#endif // skgpu_graphite_ImageProvider_DEFINED
diff --git a/gfx/skia/skia/include/gpu/graphite/Recorder.h b/gfx/skia/skia/include/gpu/graphite/Recorder.h
new file mode 100644
index 0000000000..b27f682d2d
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/graphite/Recorder.h
@@ -0,0 +1,212 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_graphite_Recorder_DEFINED
+#define skgpu_graphite_Recorder_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSize.h"
+#include "include/gpu/graphite/GraphiteTypes.h"
+#include "include/gpu/graphite/Recording.h"
+#include "include/private/base/SingleOwner.h"
+#include "include/private/base/SkTArray.h"
+
+#include <vector>
+
+class SkCanvas;
+struct SkImageInfo;
+class SkPixmap;
+
+namespace skgpu {
+class RefCntedCallback;
+class TokenTracker;
+}
+
+namespace sktext::gpu {
+class StrikeCache;
+class TextBlobRedrawCoordinator;
+}
+
+namespace skgpu::graphite {
+
+class AtlasManager;
+class BackendTexture;
+class Caps;
+class Context;
+class Device;
+class DrawBufferManager;
+class GlobalCache;
+class ImageProvider;
+class RecorderPriv;
+class ResourceProvider;
+class RuntimeEffectDictionary;
+class SharedContext;
+class Task;
+class TaskGraph;
+class TextureDataBlock;
+class TextureInfo;
+class UniformDataBlock;
+class UploadBufferManager;
+
+template<typename T> class PipelineDataCache;
+using UniformDataCache = PipelineDataCache<UniformDataBlock>;
+using TextureDataCache = PipelineDataCache<TextureDataBlock>;
+
+struct SK_API RecorderOptions final {
+ RecorderOptions();
+ RecorderOptions(const RecorderOptions&);
+ ~RecorderOptions();
+
+ sk_sp<ImageProvider> fImageProvider;
+};
+
+class SK_API Recorder final {
+public:
+ Recorder(const Recorder&) = delete;
+ Recorder(Recorder&&) = delete;
+ Recorder& operator=(const Recorder&) = delete;
+ Recorder& operator=(Recorder&&) = delete;
+
+ ~Recorder();
+
+ std::unique_ptr<Recording> snap();
+
+ ImageProvider* clientImageProvider() { return fClientImageProvider.get(); }
+ const ImageProvider* clientImageProvider() const { return fClientImageProvider.get(); }
+
+ /**
+ * Creates a new backend gpu texture matching the dimensions and TextureInfo. If an invalid
+ * TextureInfo or a TextureInfo Skia can't support is passed in, this will return an invalid
+ * BackendTexture. Thus the client should check isValid on the returned BackendTexture to know
+ * if it succeeded or not.
+ *
+ * If this does return a valid BackendTexture, the caller is required to use
+ * Recorder::deleteBackendTexture or Context::deleteBackendTexture to delete the texture. It is
+ * safe to use the Context that created this Recorder or any other Recorder created from the
+ * same Context to call deleteBackendTexture.
+ */
+ BackendTexture createBackendTexture(SkISize dimensions, const TextureInfo&);
+
+ /**
+ * If possible, updates a backend texture with the provided pixmap data. The client
+ * should check the return value to see if the update was successful. The client is required
+ * to insert a Recording into the Context and call `submit` to send the upload work to the gpu.
+ * The backend texture must be compatible with the provided pixmap(s). Compatible, in this case,
+ * means that the backend format is compatible with the base pixmap's colortype. The src data
+ * can be deleted when this call returns.
+ * If the backend texture is mip mapped, the data for all the mipmap levels must be provided.
+ * In the mipmapped case all the colortypes of the provided pixmaps must be the same.
+ * Additionally, all the miplevels must be sized correctly (please see
+ * SkMipmap::ComputeLevelSize and ComputeLevelCount).
+ * Note: the pixmap's alphatypes and colorspaces are ignored.
+ * For the Vulkan backend after a successful update the layout of the created VkImage will be:
+ * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
+ */
+ bool updateBackendTexture(const BackendTexture&,
+ const SkPixmap srcData[],
+ int numLevels);
+
+ /**
+ * Called to delete the passed in BackendTexture. This should only be called if the
+ * BackendTexture was created by calling Recorder::createBackendTexture on a Recorder that is
+ * associated with the same Context. If the BackendTexture is not valid or does not match the
+ * BackendApi of the Recorder then nothing happens.
+ *
+ * Otherwise this will delete/release the backend object that is wrapped in the BackendTexture.
+ * The BackendTexture will be reset to an invalid state and should not be used again.
+ */
+ void deleteBackendTexture(BackendTexture&);
+
+ // Adds a proc that will be moved to the Recording upon snap, subsequently attached to the
+ // CommandBuffer when the Recording is added, and called when that CommandBuffer is submitted
+ // and finishes. If the Recorder or Recording is deleted before the proc is added to the
+ // CommandBuffer, it will be called with result Failure.
+ void addFinishInfo(const InsertFinishInfo&);
+
+ // Returns a canvas that will record to a proxy surface, which must be instantiated on replay.
+ // This can only be called once per Recording; subsequent calls will return null until a
+ // Recording is snapped. Additionally, the returned SkCanvas is only valid until the next
+ // Recording snap, at which point it is deleted.
+ SkCanvas* makeDeferredCanvas(const SkImageInfo&, const TextureInfo&);
+
+ // Provides access to functions that aren't part of the public API.
+ RecorderPriv priv();
+ const RecorderPriv priv() const; // NOLINT(readability-const-return-type)
+
+#if GR_TEST_UTILS
+ bool deviceIsRegistered(Device*);
+#endif
+
+private:
+ friend class Context; // For ctor
+ friend class Device; // For registering and deregistering Devices;
+ friend class RecorderPriv; // for ctor and hidden methods
+
+ Recorder(sk_sp<SharedContext>, const RecorderOptions&);
+
+ SingleOwner* singleOwner() const { return &fSingleOwner; }
+
+ BackendApi backend() const;
+
+ // We keep track of all Devices that are connected to a Recorder. This allows the client to
+ // safely delete an SkSurface or a Recorder in any order. If the client deletes the Recorder
+ // we need to notify all Devices that the Recorder is no longer valid. If we delete the
+ // SkSurface/Device first we will flush all the Device's into the Recorder before deregistering
+ // it from the Recorder.
+ //
+ // We do not need to take a ref on the Device since the Device will flush and deregister itself
+ // in its dtor. There is no other need for the Recorder to know about the Device after this
+ // point.
+ //
+ // Note: We could probably get by with only registering Devices directly connected to
+ // SkSurfaces. All other one off Devices will be created in a controlled scope where the
+ // Recorder should still be valid by the time they need to flush their work when the Device is
+ // deleted. We would have to make sure we safely handle cases where a client calls saveLayer
+ // then either deletes the SkSurface or Recorder before calling restore. For simplicity we just
+ // register every device for now, but if we see extra overhead in pushing back the extra
+ // pointers, we can look into only registering SkSurface Devices.
+ void registerDevice(Device*);
+ void deregisterDevice(const Device*);
+
+ sk_sp<SharedContext> fSharedContext;
+ std::unique_ptr<ResourceProvider> fResourceProvider;
+ std::unique_ptr<RuntimeEffectDictionary> fRuntimeEffectDict;
+
+ std::unique_ptr<TaskGraph> fGraph;
+ std::unique_ptr<UniformDataCache> fUniformDataCache;
+ std::unique_ptr<TextureDataCache> fTextureDataCache;
+ std::unique_ptr<DrawBufferManager> fDrawBufferManager;
+ std::unique_ptr<UploadBufferManager> fUploadBufferManager;
+ std::vector<Device*> fTrackedDevices;
+
+ uint32_t fRecorderID; // Needed for MessageBox handling for text
+ std::unique_ptr<AtlasManager> fAtlasManager;
+ std::unique_ptr<TokenTracker> fTokenTracker;
+ std::unique_ptr<sktext::gpu::StrikeCache> fStrikeCache;
+ std::unique_ptr<sktext::gpu::TextBlobRedrawCoordinator> fTextBlobCache;
+ sk_sp<ImageProvider> fClientImageProvider;
+
+ // In debug builds we guard against improper thread handling
+ // This guard is passed to the ResourceCache.
+ // TODO: Should we also pass this to Device, DrawContext, and similar classes?
+ mutable SingleOwner fSingleOwner;
+
+ sk_sp<Device> fTargetProxyDevice;
+ std::unique_ptr<SkCanvas> fTargetProxyCanvas;
+ std::unique_ptr<Recording::LazyProxyData> fTargetProxyData;
+
+ SkTArray<sk_sp<RefCntedCallback>> fFinishedProcs;
+
+#if GRAPHITE_TEST_UTILS
+ // For testing use only -- the Context used to create this Recorder
+ Context* fContext = nullptr;
+#endif
+};
+
+} // namespace skgpu::graphite
+
+#endif // skgpu_graphite_Recorder_DEFINED
diff --git a/gfx/skia/skia/include/gpu/graphite/Recording.h b/gfx/skia/skia/include/gpu/graphite/Recording.h
new file mode 100644
index 0000000000..6a94ab84b8
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/graphite/Recording.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_graphite_Recording_DEFINED
+#define skgpu_graphite_Recording_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/private/SkChecksum.h"
+#include "include/private/base/SkTArray.h"
+
+#include <memory>
+#include <unordered_set>
+#include <vector>
+
+namespace skgpu {
+class RefCntedCallback;
+}
+
+namespace skgpu::graphite {
+
+class CommandBuffer;
+class RecordingPriv;
+class Resource;
+class ResourceProvider;
+class TaskGraph;
+class Texture;
+class TextureInfo;
+class TextureProxy;
+
+class Recording final {
+public:
+ ~Recording();
+
+ RecordingPriv priv();
+
+#if GRAPHITE_TEST_UTILS
+ bool isTargetProxyInstantiated() const;
+#endif
+
+private:
+ friend class Recorder; // for ctor and LazyProxyData
+ friend class RecordingPriv;
+
+ // LazyProxyData is used if this recording should be replayed to a target that is provided on
+ // replay, and it handles the target proxy's instantiation with the provided target.
+ class LazyProxyData {
+ public:
+ LazyProxyData(const TextureInfo&);
+
+ TextureProxy* lazyProxy();
+ sk_sp<TextureProxy> refLazyProxy();
+
+ bool lazyInstantiate(ResourceProvider*, sk_sp<Texture>);
+
+ private:
+ sk_sp<Texture> fTarget;
+ sk_sp<TextureProxy> fTargetProxy;
+ };
+
+ struct ProxyHash {
+ std::size_t operator()(const sk_sp<TextureProxy>& proxy) const {
+ return SkGoodHash()(proxy.get());
+ }
+ };
+
+ Recording(std::unique_ptr<TaskGraph>,
+ std::unordered_set<sk_sp<TextureProxy>, ProxyHash>&& nonVolatileLazyProxies,
+ std::unordered_set<sk_sp<TextureProxy>, ProxyHash>&& volatileLazyProxies,
+ std::unique_ptr<LazyProxyData> targetProxyData,
+ SkTArray<sk_sp<RefCntedCallback>>&& finishedProcs);
+
+ bool addCommands(CommandBuffer*, ResourceProvider*);
+ void addResourceRef(sk_sp<Resource>);
+
+ std::unique_ptr<TaskGraph> fGraph;
+ // We don't always take refs to all resources used by specific Tasks (e.g. a common buffer used
+ // for uploads). Instead we'll just hold onto one ref for those Resources outside the Tasks.
+ // Those refs are stored in the array here and will eventually be passed onto a CommandBuffer
+ // when the Recording adds its commands.
+ std::vector<sk_sp<Resource>> fExtraResourceRefs;
+
+ std::unordered_set<sk_sp<TextureProxy>, ProxyHash> fNonVolatileLazyProxies;
+ std::unordered_set<sk_sp<TextureProxy>, ProxyHash> fVolatileLazyProxies;
+
+ std::unique_ptr<LazyProxyData> fTargetProxyData;
+
+ SkTArray<sk_sp<RefCntedCallback>> fFinishedProcs;
+};
+
+} // namespace skgpu::graphite
+
+#endif // skgpu_graphite_Recording_DEFINED
diff --git a/gfx/skia/skia/include/gpu/graphite/TextureInfo.h b/gfx/skia/skia/include/gpu/graphite/TextureInfo.h
new file mode 100644
index 0000000000..dd4e6698c3
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/graphite/TextureInfo.h
@@ -0,0 +1,162 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_graphite_TextureInfo_DEFINED
+#define skgpu_graphite_TextureInfo_DEFINED
+
+#include "include/gpu/graphite/GraphiteTypes.h"
+
+#ifdef SK_DAWN
+#include "include/private/gpu/graphite/DawnTypesPriv.h"
+#endif
+
+#ifdef SK_METAL
+#include "include/private/gpu/graphite/MtlGraphiteTypesPriv.h"
+#endif
+
+#ifdef SK_VULKAN
+#include "include/private/gpu/graphite/VulkanGraphiteTypesPriv.h"
+#endif
+
+namespace skgpu::graphite {
+
+class TextureInfo {
+public:
+ TextureInfo() {}
+#ifdef SK_DAWN
+ TextureInfo(const DawnTextureInfo& dawnInfo)
+ : fBackend(BackendApi::kDawn)
+ , fValid(true)
+ , fSampleCount(dawnInfo.fSampleCount)
+ , fMipmapped(dawnInfo.fMipmapped)
+ , fProtected(Protected::kNo)
+ , fDawnSpec(dawnInfo) {}
+#endif
+
+#ifdef SK_METAL
+ TextureInfo(const MtlTextureInfo& mtlInfo)
+ : fBackend(BackendApi::kMetal)
+ , fValid(true)
+ , fSampleCount(mtlInfo.fSampleCount)
+ , fMipmapped(mtlInfo.fMipmapped)
+ , fProtected(Protected::kNo)
+ , fMtlSpec(mtlInfo) {}
+#endif
+
+#ifdef SK_VULKAN
+ TextureInfo(const VulkanTextureInfo& vkInfo)
+ : fBackend(BackendApi::kVulkan)
+ , fValid(true)
+ , fSampleCount(vkInfo.fSampleCount)
+ , fMipmapped(vkInfo.fMipmapped)
+ , fProtected(Protected::kNo)
+ , fVkSpec(vkInfo) {
+ if (vkInfo.fFlags & VK_IMAGE_CREATE_PROTECTED_BIT) {
+ fProtected = Protected::kYes;
+ }
+ }
+#endif
+
+ ~TextureInfo() {}
+ TextureInfo(const TextureInfo&) = default;
+ TextureInfo& operator=(const TextureInfo&);
+
+ bool operator==(const TextureInfo&) const;
+ bool operator!=(const TextureInfo& that) const { return !(*this == that); }
+
+ bool isValid() const { return fValid; }
+ BackendApi backend() const { return fBackend; }
+
+ uint32_t numSamples() const { return fSampleCount; }
+ Mipmapped mipmapped() const { return fMipmapped; }
+ Protected isProtected() const { return fProtected; }
+
+#ifdef SK_DAWN
+ bool getDawnTextureInfo(DawnTextureInfo* info) const {
+ if (!this->isValid() || fBackend != BackendApi::kDawn) {
+ return false;
+ }
+ *info = DawnTextureSpecToTextureInfo(fDawnSpec, fSampleCount, fMipmapped);
+ return true;
+ }
+#endif
+
+#ifdef SK_METAL
+ bool getMtlTextureInfo(MtlTextureInfo* info) const {
+ if (!this->isValid() || fBackend != BackendApi::kMetal) {
+ return false;
+ }
+ *info = MtlTextureSpecToTextureInfo(fMtlSpec, fSampleCount, fMipmapped);
+ return true;
+ }
+#endif
+
+#ifdef SK_VULKAN
+ bool getVulkanTextureInfo(VulkanTextureInfo* info) const {
+ if (!this->isValid() || fBackend != BackendApi::kVulkan) {
+ return false;
+ }
+ *info = VulkanTextureSpecToTextureInfo(fVkSpec, fSampleCount, fMipmapped);
+ return true;
+ }
+#endif
+
+private:
+#ifdef SK_DAWN
+ friend class DawnCaps;
+ friend class DawnCommandBuffer;
+ friend class DawnGraphicsPipeline;
+ friend class DawnResourceProvider;
+ friend class DawnTexture;
+ const DawnTextureSpec& dawnTextureSpec() const {
+ SkASSERT(fValid && fBackend == BackendApi::kDawn);
+ return fDawnSpec;
+ }
+#endif
+
+#ifdef SK_METAL
+ friend class MtlCaps;
+ friend class MtlGraphicsPipeline;
+ friend class MtlTexture;
+ const MtlTextureSpec& mtlTextureSpec() const {
+ SkASSERT(fValid && fBackend == BackendApi::kMetal);
+ return fMtlSpec;
+ }
+#endif
+
+#ifdef SK_VULKAN
+ friend class VulkanCaps;
+ friend class VulkanTexture;
+ const VulkanTextureSpec& vulkanTextureSpec() const {
+ SkASSERT(fValid && fBackend == BackendApi::kVulkan);
+ return fVkSpec;
+ }
+#endif
+
+ BackendApi fBackend = BackendApi::kMock;
+ bool fValid = false;
+
+ uint32_t fSampleCount = 1;
+ Mipmapped fMipmapped = Mipmapped::kNo;
+ Protected fProtected = Protected::kNo;
+
+ union {
+#ifdef SK_DAWN
+ DawnTextureSpec fDawnSpec;
+#endif
+#ifdef SK_METAL
+ MtlTextureSpec fMtlSpec;
+#endif
+#ifdef SK_VULKAN
+ VulkanTextureSpec fVkSpec;
+#endif
+ };
+};
+
+} // namespace skgpu::graphite
+
+#endif //skgpu_graphite_TextureInfo_DEFINED
diff --git a/gfx/skia/skia/include/gpu/graphite/YUVABackendTextures.h b/gfx/skia/skia/include/gpu/graphite/YUVABackendTextures.h
new file mode 100644
index 0000000000..c3b80ae196
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/graphite/YUVABackendTextures.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright 2023 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_graphite_YUVABackendTextures_DEFINED
+#define skgpu_graphite_YUVABackendTextures_DEFINED
+
+#include "include/core/SkSpan.h"
+#include "include/core/SkYUVAInfo.h"
+#include "include/gpu/graphite/BackendTexture.h"
+
+#include <tuple>
+
+namespace skgpu::graphite {
+class Recorder;
+
+/**
+ * A description of a set of BackendTextures that hold the planar data described by a SkYUVAInfo.
+ */
+class SK_API YUVABackendTextureInfo {
+public:
+ static constexpr auto kMaxPlanes = SkYUVAInfo::kMaxPlanes;
+
+ /** Default YUVABackendTextureInfo is invalid. */
+ YUVABackendTextureInfo() = default;
+ YUVABackendTextureInfo(const YUVABackendTextureInfo&) = default;
+ YUVABackendTextureInfo& operator=(const YUVABackendTextureInfo&) = default;
+
+ /**
+ * Initializes a YUVABackendTextureInfo to describe a set of textures that can store the
+ * planes indicated by the SkYUVAInfo. The texture dimensions are taken from the SkYUVAInfo's
+ * plane dimensions. All the described textures share a common origin. The planar image this
+ * describes will be mip mapped if all the textures are individually mip mapped as indicated
+ * by Mipmapped. This will produce an invalid result (return false from isValid()) if the
+ * passed formats' channels don't agree with SkYUVAInfo.
+ */
+ YUVABackendTextureInfo(const Recorder*,
+ const SkYUVAInfo&,
+ const TextureInfo[kMaxPlanes],
+ Mipmapped);
+
+ bool operator==(const YUVABackendTextureInfo&) const;
+ bool operator!=(const YUVABackendTextureInfo& that) const { return !(*this == that); }
+
+ /** TextureInfo for the ith plane, or invalid if i >= numPlanes() */
+ const TextureInfo& planeTextureInfo(int i) const {
+ SkASSERT(i >= 0);
+ return fPlaneTextureInfos[static_cast<size_t>(i)];
+ }
+
+ const SkYUVAInfo& yuvaInfo() const { return fYUVAInfo; }
+
+ SkYUVColorSpace yuvColorSpace() const { return fYUVAInfo.yuvColorSpace(); }
+
+ Mipmapped mipmapped() const { return fMipmapped; }
+
+ /** The number of planes, 0 if this YUVABackendTextureInfo is invalid. */
+ int numPlanes() const { return fYUVAInfo.numPlanes(); }
+
+ /**
+ * Returns true if this has been configured with a valid SkYUVAInfo with compatible texture
+ * formats.
+ */
+ bool isValid() const { return fYUVAInfo.isValid(); }
+
+ /**
+ * Computes a YUVALocations representation of the planar layout. The result is guaranteed to be
+ * valid if this->isValid().
+ */
+ SkYUVAInfo::YUVALocations toYUVALocations() const;
+
+private:
+ SkYUVAInfo fYUVAInfo;
+ std::array<TextureInfo, kMaxPlanes> fPlaneTextureInfos;
+ std::array<uint32_t, kMaxPlanes> fPlaneChannelMasks;
+ Mipmapped fMipmapped = Mipmapped::kNo;
+};
+
+/**
+ * A set of BackendTextures that hold the planar data for an image described a SkYUVAInfo.
+ */
+class SK_API YUVABackendTextures {
+public:
+ static constexpr auto kMaxPlanes = SkYUVAInfo::kMaxPlanes;
+
+ YUVABackendTextures() = default;
+ YUVABackendTextures(const YUVABackendTextures&) = delete;
+ YUVABackendTextures& operator=(const YUVABackendTextures&) = delete;
+
+ /**
+ * Initializes a YUVABackendTextures object from a set of textures that store the planes
+ * indicated by the SkYUVAInfo. This will produce an invalid result (return false from
+ * isValid()) if the passed texture formats' channels don't agree with SkYUVAInfo.
+ */
+ YUVABackendTextures(const Recorder*,
+ const SkYUVAInfo&,
+ const BackendTexture[kMaxPlanes]);
+
+ SkSpan<const BackendTexture> planeTextures() const {
+ return SkSpan<const BackendTexture>(fPlaneTextures);
+ }
+
+ /** BackendTexture for the ith plane, or invalid if i >= numPlanes() */
+ BackendTexture planeTexture(int i) const {
+ SkASSERT(i >= 0);
+ return fPlaneTextures[static_cast<size_t>(i)];
+ }
+
+ const SkYUVAInfo& yuvaInfo() const { return fYUVAInfo; }
+
+ SkYUVColorSpace yuvColorSpace() const { return fYUVAInfo.yuvColorSpace(); }
+
+ /** The number of planes, 0 if this YUVABackendTextureInfo is invalid. */
+ int numPlanes() const { return fYUVAInfo.numPlanes(); }
+
+ /**
+ * Returns true if this has been configured with a valid SkYUVAInfo with compatible texture
+ * formats.
+ */
+ bool isValid() const { return fYUVAInfo.isValid(); }
+
+ /**
+ * Computes a YUVALocations representation of the planar layout. The result is guaranteed to be
+ * valid if this->isValid().
+ */
+ SkYUVAInfo::YUVALocations toYUVALocations() const;
+
+private:
+ SkYUVAInfo fYUVAInfo;
+ std::array<BackendTexture, kMaxPlanes> fPlaneTextures;
+ std::array<uint32_t, kMaxPlanes> fPlaneChannelMasks;
+};
+
+} // End of namespace skgpu::graphite
+
+#endif // skgpu_graphite_YUVABackendTextures_DEFINED
diff --git a/gfx/skia/skia/include/gpu/graphite/dawn/DawnBackendContext.h b/gfx/skia/skia/include/gpu/graphite/dawn/DawnBackendContext.h
new file mode 100644
index 0000000000..99282c4d76
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/graphite/dawn/DawnBackendContext.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_graphite_DawnBackendContext_DEFINED
+#define skgpu_graphite_DawnBackendContext_DEFINED
+
+#include "webgpu/webgpu_cpp.h"
+
+namespace skgpu::graphite {
+
+// The DawnBackendContext contains all of the base Dawn objects needed by the graphite Dawn
+// backend. The client will create this object and pass it into the Context::MakeDawn factory call
+// when setting up Skia.
+struct SK_API DawnBackendContext {
+ wgpu::Device fDevice;
+ wgpu::Queue fQueue;
+};
+
+} // namespace skgpu::graphite
+
+#endif // skgpu_graphite_DawnBackendContext_DEFINED
diff --git a/gfx/skia/skia/include/gpu/graphite/dawn/DawnTypes.h b/gfx/skia/skia/include/gpu/graphite/dawn/DawnTypes.h
new file mode 100644
index 0000000000..291be75630
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/graphite/dawn/DawnTypes.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2022 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_graphite_DawnTypes_DEFINED
+#define skgpu_graphite_DawnTypes_DEFINED
+
+#include "include/gpu/graphite/GraphiteTypes.h"
+#include "webgpu/webgpu_cpp.h"
+
+namespace skgpu::graphite {
+
+struct DawnTextureInfo {
+ uint32_t fSampleCount = 1;
+ Mipmapped fMipmapped = Mipmapped::kNo;
+
+ // wgpu::TextureDescriptor properties
+ wgpu::TextureFormat fFormat = wgpu::TextureFormat::Undefined;
+ wgpu::TextureUsage fUsage = wgpu::TextureUsage::None;
+
+ DawnTextureInfo() = default;
+ DawnTextureInfo(const wgpu::Texture& texture);
+ DawnTextureInfo(uint32_t sampleCount,
+ Mipmapped mipmapped,
+ wgpu::TextureFormat format,
+ wgpu::TextureUsage usage)
+ : fSampleCount(sampleCount)
+ , fMipmapped(mipmapped)
+ , fFormat(format)
+ , fUsage(usage) {}
+};
+
+} // namespace skgpu::graphite
+
+#endif // skgpu_graphite_DawnTypes_DEFINED
+
+
diff --git a/gfx/skia/skia/include/gpu/graphite/dawn/DawnUtils.h b/gfx/skia/skia/include/gpu/graphite/dawn/DawnUtils.h
new file mode 100644
index 0000000000..ef1b57c9e0
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/graphite/dawn/DawnUtils.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_graphite_DawnUtils_DEFINED
+#define skgpu_graphite_DawnUtils_DEFINED
+
+#include <memory>
+
+namespace skgpu::graphite {
+
+class Context;
+struct ContextOptions;
+struct DawnBackendContext;
+
+namespace ContextFactory {
+std::unique_ptr<Context> MakeDawn(const DawnBackendContext&, const ContextOptions&);
+} // namespace ContextFactory
+
+} // namespace skgpu::graphite
+
+
+#endif // skgpu_graphite_DawnUtils_DEFINED
diff --git a/gfx/skia/skia/include/gpu/graphite/mtl/MtlBackendContext.h b/gfx/skia/skia/include/gpu/graphite/mtl/MtlBackendContext.h
new file mode 100644
index 0000000000..9d6d0192d1
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/graphite/mtl/MtlBackendContext.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_graphite_MtlBackendContext_DEFINED
+#define skgpu_graphite_MtlBackendContext_DEFINED
+
+#include "include/gpu/graphite/mtl/MtlGraphiteTypes.h"
+
+namespace skgpu::graphite {
+
+// The MtlBackendContext contains all of the base Metal objects needed by the graphite Metal
+// backend. The client will create this object and pass it into the Context::MakeMetal factory call
+// when setting up Skia.
+struct SK_API MtlBackendContext {
+ sk_cfp<CFTypeRef> fDevice;
+ sk_cfp<CFTypeRef> fQueue;
+};
+
+} // namespace skgpu::graphite
+
+#endif // skgpu_graphite_MtlBackendContext_DEFINED
diff --git a/gfx/skia/skia/include/gpu/graphite/mtl/MtlGraphiteTypes.h b/gfx/skia/skia/include/gpu/graphite/mtl/MtlGraphiteTypes.h
new file mode 100644
index 0000000000..bc04421643
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/graphite/mtl/MtlGraphiteTypes.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_graphite_MtlGraphiteTypes_DEFINED
+#define skgpu_graphite_MtlGraphiteTypes_DEFINED
+
+#include "include/gpu/graphite/GraphiteTypes.h"
+#include "include/ports/SkCFObject.h"
+
+///////////////////////////////////////////////////////////////////////////////
+
+#ifdef __APPLE__
+
+#include <CoreFoundation/CoreFoundation.h>
+#include <TargetConditionals.h>
+
+#if TARGET_OS_SIMULATOR
+#define SK_API_AVAILABLE_CA_METAL_LAYER SK_API_AVAILABLE(macos(10.11), ios(13.0))
+#else // TARGET_OS_SIMULATOR
+#define SK_API_AVAILABLE_CA_METAL_LAYER SK_API_AVAILABLE(macos(10.11), ios(8.0))
+#endif // TARGET_OS_SIMULATOR
+
+#endif // __APPLE__
+
+
+namespace skgpu::graphite {
+
+/**
+ * Declares typedefs for Metal types used in Graphite cpp code
+ */
+using MtlPixelFormat = unsigned int;
+using MtlTextureUsage = unsigned int;
+using MtlStorageMode = unsigned int;
+using MtlHandle = const void*;
+
+struct MtlTextureInfo {
+ uint32_t fSampleCount = 1;
+ skgpu::Mipmapped fMipmapped = skgpu::Mipmapped::kNo;
+
+ // Since we aren't in an Obj-C header we can't directly use Mtl types here. Each of these can
+ // cast to their mapped Mtl types list below.
+ MtlPixelFormat fFormat = 0; // MTLPixelFormat fFormat = MTLPixelFormatInvalid;
+ MtlTextureUsage fUsage = 0; // MTLTextureUsage fUsage = MTLTextureUsageUnknown;
+ MtlStorageMode fStorageMode = 0; // MTLStorageMode fStorageMode = MTLStorageModeShared;
+ bool fFramebufferOnly = false;
+
+ MtlTextureInfo() = default;
+ MtlTextureInfo(MtlHandle mtlTexture);
+ MtlTextureInfo(uint32_t sampleCount,
+ skgpu::Mipmapped mipmapped,
+ MtlPixelFormat format,
+ MtlTextureUsage usage,
+ MtlStorageMode storageMode,
+ bool framebufferOnly)
+ : fSampleCount(sampleCount)
+ , fMipmapped(mipmapped)
+ , fFormat(format)
+ , fUsage(usage)
+ , fStorageMode(storageMode)
+ , fFramebufferOnly(framebufferOnly) {}
+};
+
+} // namespace skgpu::graphite
+
+#endif // skgpu_graphite_MtlGraphiteTypes_DEFINED
diff --git a/gfx/skia/skia/include/gpu/graphite/mtl/MtlGraphiteUtils.h b/gfx/skia/skia/include/gpu/graphite/mtl/MtlGraphiteUtils.h
new file mode 100644
index 0000000000..681f0867ae
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/graphite/mtl/MtlGraphiteUtils.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_graphite_MtlGraphiteUtils_DEFINED
+#define skgpu_graphite_MtlGraphiteUtils_DEFINED
+
+#include <memory>
+
+namespace skgpu::graphite {
+
+class Context;
+struct ContextOptions;
+struct MtlBackendContext;
+
+namespace ContextFactory {
+std::unique_ptr<Context> MakeMetal(const MtlBackendContext&, const ContextOptions&);
+} // namespace ContextFactory
+
+} // namespace skgpu::graphite
+
+#endif // skgpu_graphite_MtlGraphiteUtils_DEFINED
diff --git a/gfx/skia/skia/include/gpu/graphite/vk/VulkanGraphiteTypes.h b/gfx/skia/skia/include/gpu/graphite/vk/VulkanGraphiteTypes.h
new file mode 100644
index 0000000000..bd448d2ca6
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/graphite/vk/VulkanGraphiteTypes.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2022 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_graphite_VulkanGraphiteTypes_DEFINED
+#define skgpu_graphite_VulkanGraphiteTypes_DEFINED
+
+#include "include/gpu/graphite/GraphiteTypes.h"
+#include "include/gpu/vk/VulkanTypes.h"
+
+namespace skgpu::graphite {
+
+struct VulkanTextureInfo {
+ uint32_t fSampleCount = 1;
+ Mipmapped fMipmapped = Mipmapped::kNo;
+
+ // VkImageCreateInfo properties
+ // Currently the only supported flag is VK_IMAGE_CREATE_PROTECTED_BIT. Any other flag will not
+ // be accepted
+ VkImageCreateFlags fFlags = 0;
+ VkFormat fFormat = VK_FORMAT_UNDEFINED;
+ VkImageTiling fImageTiling = VK_IMAGE_TILING_OPTIMAL;
+ VkImageUsageFlags fImageUsageFlags = 0;
+ VkSharingMode fSharingMode = VK_SHARING_MODE_EXCLUSIVE;
+
+ // Properties related to the image view and sampling. These are less inherent properties of the
+ // VkImage but describe how the VkImage should be used within Skia.
+
+ // What aspect to use for the VkImageView. The normal, default is VK_IMAGE_ASPECT_COLOR_BIT.
+ // However, if the VkImage is a Ycbcr format, the client can pass a specific plan here to have
+ // Skia directly sample a plane. In that case the client should also pass in a VkFormat that is
+ // compatible with the plane as described by the Vulkan spec.
+ VkImageAspectFlags fAspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+ // TODO: Either Make the ycbcr conversion info shareable with Ganesh or add a version for
+ // Graphite.
+ // GrVkYcbcrConversionInfo fYcbcrConversionInfo;
+
+ VulkanTextureInfo() = default;
+ VulkanTextureInfo(uint32_t sampleCount,
+ Mipmapped mipmapped,
+ VkImageCreateFlags flags,
+ VkFormat format,
+ VkImageTiling imageTiling,
+ VkImageUsageFlags imageUsageFlags,
+ VkSharingMode sharingMode,
+ VkImageAspectFlags aspectMask)
+ : fSampleCount(sampleCount)
+ , fMipmapped(mipmapped)
+ , fFlags(flags)
+ , fFormat(format)
+ , fImageTiling(imageTiling)
+ , fImageUsageFlags(imageUsageFlags)
+ , fSharingMode(sharingMode)
+ , fAspectMask(aspectMask) {}
+};
+
+} // namespace skgpu::graphite
+
+#endif // skgpu_graphite_VulkanGraphiteTypes_DEFINED
+
+
diff --git a/gfx/skia/skia/include/gpu/graphite/vk/VulkanGraphiteUtils.h b/gfx/skia/skia/include/gpu/graphite/vk/VulkanGraphiteUtils.h
new file mode 100644
index 0000000000..07c76a332d
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/graphite/vk/VulkanGraphiteUtils.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_graphite_VulkanGraphiteUtils_DEFINED
+#define skgpu_graphite_VulkanGraphiteUtils_DEFINED
+
+#include <memory>
+
+namespace skgpu { struct VulkanBackendContext; }
+
+namespace skgpu::graphite {
+
+class Context;
+struct ContextOptions;
+
+namespace ContextFactory {
+std::unique_ptr<Context> MakeVulkan(const VulkanBackendContext&, const ContextOptions&);
+} // namespace ContextFactory
+
+} // namespace skgpu::graphite
+
+#endif // skgpu_graphite_VulkanGraphiteUtils_DEFINED
diff --git a/gfx/skia/skia/include/gpu/mock/GrMockTypes.h b/gfx/skia/skia/include/gpu/mock/GrMockTypes.h
new file mode 100644
index 0000000000..dfa648086c
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/mock/GrMockTypes.h
@@ -0,0 +1,146 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrMockOptions_DEFINED
+#define GrMockOptions_DEFINED
+
+#include "include/core/SkTextureCompressionType.h"
+#include "include/gpu/GpuTypes.h"
+#include "include/private/gpu/ganesh/GrTypesPriv.h"
+
+class GrBackendFormat;
+
+struct GrMockTextureInfo {
+ GrMockTextureInfo()
+ : fColorType(GrColorType::kUnknown)
+ , fCompressionType(SkTextureCompressionType::kNone)
+ , fID(0) {}
+
+ GrMockTextureInfo(GrColorType colorType,
+ SkTextureCompressionType compressionType,
+ int id)
+ : fColorType(colorType)
+ , fCompressionType(compressionType)
+ , fID(id) {
+ SkASSERT(fID);
+ if (fCompressionType != SkTextureCompressionType::kNone) {
+ SkASSERT(colorType == GrColorType::kUnknown);
+ }
+ }
+
+ bool operator==(const GrMockTextureInfo& that) const {
+ return fColorType == that.fColorType &&
+ fCompressionType == that.fCompressionType &&
+ fID == that.fID;
+ }
+
+ GrBackendFormat getBackendFormat() const;
+
+ SkTextureCompressionType compressionType() const { return fCompressionType; }
+
+ GrColorType colorType() const {
+ SkASSERT(fCompressionType == SkTextureCompressionType::kNone);
+ return fColorType;
+ }
+
+ int id() const { return fID; }
+
+private:
+ GrColorType fColorType;
+ SkTextureCompressionType fCompressionType;
+ int fID;
+};
+
+struct GrMockRenderTargetInfo {
+ GrMockRenderTargetInfo()
+ : fColorType(GrColorType::kUnknown)
+ , fID(0) {}
+
+ GrMockRenderTargetInfo(GrColorType colorType, int id)
+ : fColorType(colorType)
+ , fID(id) {
+ SkASSERT(fID);
+ }
+
+ bool operator==(const GrMockRenderTargetInfo& that) const {
+ return fColorType == that.fColorType &&
+ fID == that.fID;
+ }
+
+ GrBackendFormat getBackendFormat() const;
+
+ GrColorType colorType() const { return fColorType; }
+
+private:
+ GrColorType fColorType;
+ int fID;
+};
+
+struct GrMockSurfaceInfo {
+ uint32_t fSampleCount = 1;
+ uint32_t fLevelCount = 0;
+ skgpu::Protected fProtected = skgpu::Protected::kNo;
+
+ GrColorType fColorType = GrColorType::kUnknown;
+ SkTextureCompressionType fCompressionType = SkTextureCompressionType::kNone;
+};
+
+static constexpr int kSkTextureCompressionTypeCount = static_cast<int>(SkTextureCompressionType::kLast) + 1;
+
+/**
+ * A pointer to this type is used as the GrBackendContext when creating a Mock GrContext. It can be
+ * used to specify capability options for the mock context. If nullptr is used a default constructed
+ * GrMockOptions is used.
+ */
+struct GrMockOptions {
+ GrMockOptions() {
+ using Renderability = ConfigOptions::Renderability;
+ // By default RGBA_8888 and BGRA_8888 are textureable and renderable and
+ // A8 and RGB565 are texturable.
+ fConfigOptions[(int)GrColorType::kRGBA_8888].fRenderability = Renderability::kNonMSAA;
+ fConfigOptions[(int)GrColorType::kRGBA_8888].fTexturable = true;
+ fConfigOptions[(int)GrColorType::kAlpha_8].fTexturable = true;
+ fConfigOptions[(int)GrColorType::kBGR_565].fTexturable = true;
+
+ fConfigOptions[(int)GrColorType::kBGRA_8888] = fConfigOptions[(int)GrColorType::kRGBA_8888];
+
+ fCompressedOptions[(int)SkTextureCompressionType::kETC2_RGB8_UNORM].fTexturable = true;
+ fCompressedOptions[(int)SkTextureCompressionType::kBC1_RGB8_UNORM].fTexturable = true;
+ fCompressedOptions[(int)SkTextureCompressionType::kBC1_RGBA8_UNORM].fTexturable = true;
+ }
+
+ struct ConfigOptions {
+ enum Renderability { kNo, kNonMSAA, kMSAA };
+ Renderability fRenderability = kNo;
+ bool fTexturable = false;
+ };
+
+ // GrCaps options.
+ bool fMipmapSupport = false;
+ bool fDrawInstancedSupport = false;
+ bool fHalfFloatVertexAttributeSupport = false;
+ uint32_t fMapBufferFlags = 0;
+ int fMaxTextureSize = 2048;
+ int fMaxRenderTargetSize = 2048;
+ int fMaxWindowRectangles = 0;
+ int fMaxVertexAttributes = 16;
+ ConfigOptions fConfigOptions[kGrColorTypeCnt];
+ ConfigOptions fCompressedOptions[kSkTextureCompressionTypeCount];
+
+ // GrShaderCaps options.
+ bool fIntegerSupport = false;
+ bool fFlatInterpolationSupport = false;
+ int fMaxVertexSamplers = 0;
+ int fMaxFragmentSamplers = 8;
+ bool fShaderDerivativeSupport = true;
+ bool fDualSourceBlendingSupport = false;
+
+ // GrMockGpu options.
+ bool fFailTextureAllocations = false;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/mtl/GrMtlBackendContext.h b/gfx/skia/skia/include/gpu/mtl/GrMtlBackendContext.h
new file mode 100644
index 0000000000..0d88f479ac
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/mtl/GrMtlBackendContext.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright 2020 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrMtlBackendContext_DEFINED
+#define GrMtlBackendContext_DEFINED
+
+#include "include/gpu/mtl/GrMtlTypes.h"
+
+// The BackendContext contains all of the base Metal objects needed by the GrMtlGpu. The assumption
+// is that the client will set these up and pass them to the GrMtlGpu constructor.
+struct SK_API GrMtlBackendContext {
+ sk_cfp<GrMTLHandle> fDevice;
+ sk_cfp<GrMTLHandle> fQueue;
+ sk_cfp<GrMTLHandle> fBinaryArchive;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/mtl/GrMtlTypes.h b/gfx/skia/skia/include/gpu/mtl/GrMtlTypes.h
new file mode 100644
index 0000000000..7c0d620e06
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/mtl/GrMtlTypes.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrMtlTypes_DEFINED
+#define GrMtlTypes_DEFINED
+
+#include "include/gpu/GpuTypes.h"
+#include "include/ports/SkCFObject.h"
+
+/**
+ * Declares typedefs for Metal types used in Ganesh cpp code
+ */
+using GrMTLPixelFormat = unsigned int;
+using GrMTLTextureUsage = unsigned int;
+using GrMTLStorageMode = unsigned int;
+using GrMTLHandle = const void*;
+
+///////////////////////////////////////////////////////////////////////////////
+
+#ifdef __APPLE__
+
+#include <TargetConditionals.h>
+
+#if TARGET_OS_SIMULATOR
+#define SK_API_AVAILABLE_CA_METAL_LAYER SK_API_AVAILABLE(macos(10.11), ios(13.0))
+#else // TARGET_OS_SIMULATOR
+#define SK_API_AVAILABLE_CA_METAL_LAYER SK_API_AVAILABLE(macos(10.11), ios(8.0))
+#endif // TARGET_OS_SIMULATOR
+
+/**
+ * Types for interacting with Metal resources created externally to Skia.
+ * This is used by GrBackendObjects.
+ */
+struct GrMtlTextureInfo {
+public:
+ GrMtlTextureInfo() {}
+
+ sk_cfp<GrMTLHandle> fTexture;
+
+ bool operator==(const GrMtlTextureInfo& that) const {
+ return fTexture == that.fTexture;
+ }
+};
+
+struct GrMtlSurfaceInfo {
+ uint32_t fSampleCount = 1;
+ uint32_t fLevelCount = 0;
+ skgpu::Protected fProtected = skgpu::Protected::kNo;
+
+ // Since we aren't in an Obj-C header we can't directly use Mtl types here. Each of these can
+ // cast to their mapped Mtl types list below.
+ GrMTLPixelFormat fFormat = 0; // MTLPixelFormat fFormat = MTLPixelFormatInvalid;
+ GrMTLTextureUsage fUsage = 0; // MTLTextureUsage fUsage = MTLTextureUsageUnknown;
+ GrMTLStorageMode fStorageMode = 0; // MTLStorageMode fStorageMode = MTLStorageModeShared;
+};
+
+#endif
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/mtl/MtlMemoryAllocator.h b/gfx/skia/skia/include/gpu/mtl/MtlMemoryAllocator.h
new file mode 100644
index 0000000000..425c461791
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/mtl/MtlMemoryAllocator.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2022 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_MtlMemoryAllocator_DEFINED
+#define skgpu_MtlMemoryAllocator_DEFINED
+
+#ifdef __APPLE__
+
+#ifdef __OBJC__
+#import <Metal/Metal.h>
+#endif
+
+namespace skgpu {
+
+// interface classes for the GPU memory allocator
+class MtlAlloc : public SkRefCnt {
+public:
+ ~MtlAlloc() override = default;
+};
+
+#ifdef __OBJC__
+class MtlMemoryAllocator : public SkRefCnt {
+public:
+ virtual id<MTLBuffer> newBufferWithLength(NSUInteger length, MTLResourceOptions options,
+ sk_sp<MtlAlloc>* allocation) = 0;
+ virtual id<MTLTexture> newTextureWithDescriptor(MTLTextureDescriptor* texDesc,
+ sk_sp<MtlAlloc>* allocation) = 0;
+};
+#endif
+
+} // namespace skgpu
+
+#endif // __APPLE__
+
+#endif // skgpu_MtlMemoryAllocator_DEFINED
diff --git a/gfx/skia/skia/include/gpu/vk/GrVkBackendContext.h b/gfx/skia/skia/include/gpu/vk/GrVkBackendContext.h
new file mode 100644
index 0000000000..23c1b0deaf
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/vk/GrVkBackendContext.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkBackendContext_DEFINED
+#define GrVkBackendContext_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/gpu/vk/GrVkTypes.h"
+#include "include/gpu/vk/VulkanMemoryAllocator.h"
+
+namespace skgpu { class VulkanExtensions; }
+
+enum GrVkExtensionFlags {
+ kEXT_debug_report_GrVkExtensionFlag = 0x0001,
+ kNV_glsl_shader_GrVkExtensionFlag = 0x0002,
+ kKHR_surface_GrVkExtensionFlag = 0x0004,
+ kKHR_swapchain_GrVkExtensionFlag = 0x0008,
+ kKHR_win32_surface_GrVkExtensionFlag = 0x0010,
+ kKHR_android_surface_GrVkExtensionFlag = 0x0020,
+ kKHR_xcb_surface_GrVkExtensionFlag = 0x0040,
+};
+
+enum GrVkFeatureFlags {
+ kGeometryShader_GrVkFeatureFlag = 0x0001,
+ kDualSrcBlend_GrVkFeatureFlag = 0x0002,
+ kSampleRateShading_GrVkFeatureFlag = 0x0004,
+};
+
+// It is not guarenteed VkPhysicalDeviceProperties2 will be in the client's header so we forward
+// declare it here to be safe.
+struct VkPhysicalDeviceFeatures2;
+
+// The BackendContext contains all of the base Vulkan objects needed by the GrVkGpu. The assumption
+// is that the client will set these up and pass them to the GrVkGpu constructor. The VkDevice
+// created must support at least one graphics queue, which is passed in as well.
+// The QueueFamilyIndex must match the family of the given queue. It is needed for CommandPool
+// creation, and any GrBackendObjects handed to us (e.g., for wrapped textures) needs to be created
+// in or transitioned to that family. The refs held by members of this struct must be released
+// (either by deleting the struct or manually releasing the refs) before the underlying vulkan
+// device and instance are destroyed.
+struct SK_API GrVkBackendContext {
+ VkInstance fInstance = VK_NULL_HANDLE;
+ VkPhysicalDevice fPhysicalDevice = VK_NULL_HANDLE;
+ VkDevice fDevice = VK_NULL_HANDLE;
+ VkQueue fQueue = VK_NULL_HANDLE;
+ uint32_t fGraphicsQueueIndex = 0;
+ uint32_t fMinAPIVersion = 0; // Deprecated. Use fInstanceVersion
+ // instead.
+ uint32_t fInstanceVersion = 0; // Deprecated. Use fMaxApiVersion
+ // The max api version set here should match the value set in VkApplicationInfo::apiVersion when
+ // then VkInstance was created.
+ uint32_t fMaxAPIVersion = 0;
+ uint32_t fExtensions = 0; // Deprecated. Use fVkExtensions instead.
+ const skgpu::VulkanExtensions* fVkExtensions = nullptr;
+ uint32_t fFeatures = 0; // Deprecated. Use fDeviceFeatures[2]
+ // instead.
+ // The client can create their VkDevice with either a VkPhysicalDeviceFeatures or
+ // VkPhysicalDeviceFeatures2 struct, thus we have to support taking both. The
+ // VkPhysicalDeviceFeatures2 struct is needed so we know if the client enabled any extension
+ // specific features. If fDeviceFeatures2 is not null then we ignore fDeviceFeatures. If both
+ // fDeviceFeatures and fDeviceFeatures2 are null we will assume no features are enabled.
+ const VkPhysicalDeviceFeatures* fDeviceFeatures = nullptr;
+ const VkPhysicalDeviceFeatures2* fDeviceFeatures2 = nullptr;
+ sk_sp<skgpu::VulkanMemoryAllocator> fMemoryAllocator;
+ skgpu::VulkanGetProc fGetProc = nullptr;
+ // This is deprecated and should be set to false. The client is responsible for managing the
+ // lifetime of the VkInstance and VkDevice objects.
+ bool fOwnsInstanceAndDevice = false;
+ // Indicates that we are working with protected content and all CommandPool and Queue operations
+ // should be done in a protected context.
+ skgpu::Protected fProtectedContext = skgpu::Protected::kNo;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/vk/GrVkExtensions.h b/gfx/skia/skia/include/gpu/vk/GrVkExtensions.h
new file mode 100644
index 0000000000..b32cc16eb5
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/vk/GrVkExtensions.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkExtensions_DEFINED
+#define GrVkExtensions_DEFINED
+
+#include "include/gpu/vk/VulkanExtensions.h"
+
+using GrVkExtensions = skgpu::VulkanExtensions;
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/vk/GrVkMemoryAllocator.h b/gfx/skia/skia/include/gpu/vk/GrVkMemoryAllocator.h
new file mode 100644
index 0000000000..034e1f506c
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/vk/GrVkMemoryAllocator.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkMemoryAllocator_DEFINED
+#define GrVkMemoryAllocator_DEFINED
+
+#include "include/gpu/vk/VulkanMemoryAllocator.h"
+
+using GrVkMemoryAllocator = skgpu::VulkanMemoryAllocator;
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/vk/GrVkTypes.h b/gfx/skia/skia/include/gpu/vk/GrVkTypes.h
new file mode 100644
index 0000000000..ae680a8af5
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/vk/GrVkTypes.h
@@ -0,0 +1,149 @@
+
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkTypes_DEFINED
+#define GrVkTypes_DEFINED
+
+#include "include/gpu/GpuTypes.h"
+#include "include/gpu/vk/VulkanTypes.h"
+
+using GrVkBackendMemory = skgpu::VulkanBackendMemory;
+using GrVkAlloc = skgpu::VulkanAlloc;
+
+// This struct is used to pass in the necessary information to create a VkSamplerYcbcrConversion
+// object for an VkExternalFormatANDROID.
+struct GrVkYcbcrConversionInfo {
+ bool operator==(const GrVkYcbcrConversionInfo& that) const {
+ // Invalid objects are not required to have all other fields initialized or matching.
+ if (!this->isValid() && !that.isValid()) {
+ return true;
+ }
+ return this->fFormat == that.fFormat &&
+ this->fExternalFormat == that.fExternalFormat &&
+ this->fYcbcrModel == that.fYcbcrModel &&
+ this->fYcbcrRange == that.fYcbcrRange &&
+ this->fXChromaOffset == that.fXChromaOffset &&
+ this->fYChromaOffset == that.fYChromaOffset &&
+ this->fChromaFilter == that.fChromaFilter &&
+ this->fForceExplicitReconstruction == that.fForceExplicitReconstruction;
+ }
+ bool operator!=(const GrVkYcbcrConversionInfo& that) const { return !(*this == that); }
+
+ bool isValid() const { return fYcbcrModel != VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY; }
+
+ // Format of the source image. Must be set to VK_FORMAT_UNDEFINED for external images or
+ // a valid image format otherwise.
+ VkFormat fFormat = VK_FORMAT_UNDEFINED;
+
+ // The external format. Must be non-zero for external images, zero otherwise.
+ // Should be compatible to be used in a VkExternalFormatANDROID struct.
+ uint64_t fExternalFormat = 0;
+
+ VkSamplerYcbcrModelConversion fYcbcrModel = VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY;
+ VkSamplerYcbcrRange fYcbcrRange = VK_SAMPLER_YCBCR_RANGE_ITU_FULL;
+ VkChromaLocation fXChromaOffset = VK_CHROMA_LOCATION_COSITED_EVEN;
+ VkChromaLocation fYChromaOffset = VK_CHROMA_LOCATION_COSITED_EVEN;
+ VkFilter fChromaFilter = VK_FILTER_NEAREST;
+ VkBool32 fForceExplicitReconstruction = false;
+
+ // For external images format features here should be those returned by a call to
+ // vkAndroidHardwareBufferFormatPropertiesANDROID
+ VkFormatFeatureFlags fFormatFeatures = 0;
+};
+
+/*
+ * When wrapping a GrBackendTexture or GrBackendRendenderTarget, the fCurrentQueueFamily should
+ * either be VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_EXTERNAL, or VK_QUEUE_FAMILY_FOREIGN_EXT. If
+ * fSharingMode is VK_SHARING_MODE_EXCLUSIVE then fCurrentQueueFamily can also be the graphics
+ * queue index passed into Skia.
+ */
+struct GrVkImageInfo {
+ VkImage fImage = VK_NULL_HANDLE;
+ skgpu::VulkanAlloc fAlloc;
+ VkImageTiling fImageTiling = VK_IMAGE_TILING_OPTIMAL;
+ VkImageLayout fImageLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+ VkFormat fFormat = VK_FORMAT_UNDEFINED;
+ VkImageUsageFlags fImageUsageFlags = 0;
+ uint32_t fSampleCount = 1;
+ uint32_t fLevelCount = 0;
+ uint32_t fCurrentQueueFamily = VK_QUEUE_FAMILY_IGNORED;
+ skgpu::Protected fProtected = skgpu::Protected::kNo;
+ GrVkYcbcrConversionInfo fYcbcrConversionInfo;
+ VkSharingMode fSharingMode = VK_SHARING_MODE_EXCLUSIVE;
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ bool fPartOfSwapchainOrAndroidWindow = false;
+#endif
+
+#if GR_TEST_UTILS
+ bool operator==(const GrVkImageInfo& that) const {
+ bool equal = fImage == that.fImage && fAlloc == that.fAlloc &&
+ fImageTiling == that.fImageTiling &&
+ fImageLayout == that.fImageLayout &&
+ fFormat == that.fFormat &&
+ fImageUsageFlags == that.fImageUsageFlags &&
+ fSampleCount == that.fSampleCount &&
+ fLevelCount == that.fLevelCount &&
+ fCurrentQueueFamily == that.fCurrentQueueFamily &&
+ fProtected == that.fProtected &&
+ fYcbcrConversionInfo == that.fYcbcrConversionInfo &&
+ fSharingMode == that.fSharingMode;
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ equal = equal && (fPartOfSwapchainOrAndroidWindow == that.fPartOfSwapchainOrAndroidWindow);
+#endif
+ return equal;
+ }
+#endif
+};
+
+using GrVkGetProc = skgpu::VulkanGetProc;
+
+/**
+ * This object is wrapped in a GrBackendDrawableInfo and passed in as an argument to
+ * drawBackendGpu() calls on an SkDrawable. The drawable will use this info to inject direct
+ * Vulkan calls into our stream of GPU draws.
+ *
+ * The SkDrawable is given a secondary VkCommandBuffer in which to record draws. The GPU backend
+ * will then execute that command buffer within a render pass it is using for its own draws. The
+ * drawable is also given the attachment of the color index, a compatible VkRenderPass, and the
+ * VkFormat of the color attachment so that it can make VkPipeline objects for the draws. The
+ * SkDrawable must not alter the state of the VkRenderpass or sub pass.
+ *
+ * Additionally, the SkDrawable may fill in the passed in fDrawBounds with the bounds of the draws
+ * that it submits to the command buffer. This will be used by the GPU backend for setting the
+ * bounds in vkCmdBeginRenderPass. If fDrawBounds is not updated, we will assume that the entire
+ * attachment may have been written to.
+ *
+ * The SkDrawable is always allowed to create its own command buffers and submit them to the queue
+ * to render offscreen textures which will be sampled in draws added to the passed in
+ * VkCommandBuffer. If this is done the SkDrawable is in charge of adding the required memory
+ * barriers to the queue for the sampled images since the Skia backend will not do this.
+ */
+struct GrVkDrawableInfo {
+ VkCommandBuffer fSecondaryCommandBuffer;
+ uint32_t fColorAttachmentIndex;
+ VkRenderPass fCompatibleRenderPass;
+ VkFormat fFormat;
+ VkRect2D* fDrawBounds;
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ bool fFromSwapchainOrAndroidWindow;
+#endif
+};
+
+struct GrVkSurfaceInfo {
+ uint32_t fSampleCount = 1;
+ uint32_t fLevelCount = 0;
+ skgpu::Protected fProtected = skgpu::Protected::kNo;
+
+ VkImageTiling fImageTiling = VK_IMAGE_TILING_OPTIMAL;
+ VkFormat fFormat = VK_FORMAT_UNDEFINED;
+ VkImageUsageFlags fImageUsageFlags = 0;
+ GrVkYcbcrConversionInfo fYcbcrConversionInfo;
+ VkSharingMode fSharingMode = VK_SHARING_MODE_EXCLUSIVE;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/vk/VulkanBackendContext.h b/gfx/skia/skia/include/gpu/vk/VulkanBackendContext.h
new file mode 100644
index 0000000000..c78e2de0c9
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/vk/VulkanBackendContext.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_VulkanBackendContext_DEFINED
+#define skgpu_VulkanBackendContext_DEFINED
+
+#include "include/gpu/GpuTypes.h"
+#include "include/gpu/vk/VulkanMemoryAllocator.h"
+#include "include/gpu/vk/VulkanTypes.h"
+
+namespace skgpu {
+
+class VulkanExtensions;
+
+// The VkBackendContext contains all of the base Vk objects needed by the skia Vulkan context.
+struct SK_API VulkanBackendContext {
+ VkInstance fInstance;
+ VkPhysicalDevice fPhysicalDevice;
+ VkDevice fDevice;
+ VkQueue fQueue;
+ uint32_t fGraphicsQueueIndex;
+ // The max api version set here should match the value set in VkApplicationInfo::apiVersion when
+ // then VkInstance was created.
+ uint32_t fMaxAPIVersion;
+ const skgpu::VulkanExtensions* fVkExtensions = nullptr;
+ // The client can create their VkDevice with either a VkPhysicalDeviceFeatures or
+ // VkPhysicalDeviceFeatures2 struct, thus we have to support taking both. The
+ // VkPhysicalDeviceFeatures2 struct is needed so we know if the client enabled any extension
+ // specific features. If fDeviceFeatures2 is not null then we ignore fDeviceFeatures. If both
+ // fDeviceFeatures and fDeviceFeatures2 are null we will assume no features are enabled.
+ const VkPhysicalDeviceFeatures* fDeviceFeatures = nullptr;
+ const VkPhysicalDeviceFeatures2* fDeviceFeatures2 = nullptr;
+ // Optional. The client may provide an inplementation of a VulkanMemoryAllocator for Skia to use
+ // for allocating Vulkan resources that use VkDeviceMemory.
+ sk_sp<VulkanMemoryAllocator> fMemoryAllocator;
+ skgpu::VulkanGetProc fGetProc;
+ Protected fProtectedContext;
+};
+
+} // namespace skgpu::graphite
+
+#endif // skgpu_VulkanBackendContext_DEFINED
diff --git a/gfx/skia/skia/include/gpu/vk/VulkanExtensions.h b/gfx/skia/skia/include/gpu/vk/VulkanExtensions.h
new file mode 100644
index 0000000000..aea442e491
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/vk/VulkanExtensions.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2022 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_VulkanExtensions_DEFINED
+#define skgpu_VulkanExtensions_DEFINED
+
+#include "include/core/SkString.h"
+#include "include/gpu/vk/VulkanTypes.h"
+#include "include/private/base/SkTArray.h"
+
+namespace skgpu {
+
+/**
+ * Helper class that eats in an array of extensions strings for instance and device and allows for
+ * quicker querying if an extension is present.
+ */
+class SK_API VulkanExtensions {
+public:
+ VulkanExtensions() {}
+
+ void init(VulkanGetProc, VkInstance, VkPhysicalDevice,
+ uint32_t instanceExtensionCount, const char* const* instanceExtensions,
+ uint32_t deviceExtensionCount, const char* const* deviceExtensions);
+
+ bool hasExtension(const char[], uint32_t minVersion) const;
+
+ struct Info {
+ Info() {}
+ Info(const char* name) : fName(name), fSpecVersion(0) {}
+
+ SkString fName;
+ uint32_t fSpecVersion;
+
+ struct Less {
+ bool operator()(const Info& a, const SkString& b) const {
+ return strcmp(a.fName.c_str(), b.c_str()) < 0;
+ }
+ bool operator()(const SkString& a, const VulkanExtensions::Info& b) const {
+ return strcmp(a.c_str(), b.fName.c_str()) < 0;
+ }
+ };
+ };
+
+#ifdef SK_DEBUG
+ void dump() const {
+ SkDebugf("**Vulkan Extensions**\n");
+ for (int i = 0; i < fExtensions.size(); ++i) {
+ SkDebugf("%s. Version: %d\n",
+ fExtensions[i].fName.c_str(), fExtensions[i].fSpecVersion);
+ }
+ SkDebugf("**End Vulkan Extensions**\n");
+ }
+#endif
+
+private:
+ void getSpecVersions(VulkanGetProc getProc, VkInstance, VkPhysicalDevice);
+
+ SkTArray<Info> fExtensions;
+};
+
+} // namespace skgpu
+
+#endif // skgpu_VulkanExtensions_DEFINED
diff --git a/gfx/skia/skia/include/gpu/vk/VulkanMemoryAllocator.h b/gfx/skia/skia/include/gpu/vk/VulkanMemoryAllocator.h
new file mode 100644
index 0000000000..ebaa28ed1b
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/vk/VulkanMemoryAllocator.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2022 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_VulkanMemoryAllocator_DEFINED
+#define skgpu_VulkanMemoryAllocator_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/gpu/GpuTypes.h"
+#include "include/gpu/vk/VulkanTypes.h"
+
+namespace skgpu {
+
+class VulkanMemoryAllocator : public SkRefCnt {
+public:
+ enum AllocationPropertyFlags {
+ kNone_AllocationPropertyFlag = 0b0000,
+ // Allocation will be placed in its own VkDeviceMemory and not suballocated from some larger
+ // block.
+ kDedicatedAllocation_AllocationPropertyFlag = 0b0001,
+ // Says that the backing memory can only be accessed by the device. Additionally the device
+ // may lazily allocate the memory. This cannot be used with buffers that will be host
+ // visible. Setting this flag does not guarantee that we will allocate memory that respects
+ // it, but we will try to prefer memory that can respect it.
+ kLazyAllocation_AllocationPropertyFlag = 0b0010,
+ // The allocation will be mapped immediately and stay mapped until it is destroyed. This
+ // flag is only valid for buffers which are host visible (i.e. must have a usage other than
+ // BufferUsage::kGpuOnly).
+ kPersistentlyMapped_AllocationPropertyFlag = 0b0100,
+ // Allocation can only be accessed by the device using a protected context.
+ kProtected_AllocationPropertyFlag = 0b1000,
+ };
+
+ enum class BufferUsage {
+ // Buffers that will only be accessed from the device (large const buffers) will always be
+ // in device local memory.
+ kGpuOnly,
+ // Buffers that typically will be updated multiple times by the host and read on the gpu
+ // (e.g. uniform or vertex buffers). CPU writes will generally be sequential in the buffer
+ // and will try to take advantage of the write-combined nature of the gpu buffers. Thus this
+ // will always be mappable and coherent memory, and it will prefer to be in device local
+ // memory.
+ kCpuWritesGpuReads,
+ // Buffers that will be accessed on the host and copied to another GPU resource (transfer
+ // buffers). Will always be mappable and coherent memory.
+ kTransfersFromCpuToGpu,
+ // Buffers which are typically writted to by the GPU and then read on the host. Will always
+ // be mappable memory, and will prefer cached memory.
+ kTransfersFromGpuToCpu,
+ };
+
+ virtual VkResult allocateImageMemory(VkImage image,
+ uint32_t allocationPropertyFlags,
+ skgpu::VulkanBackendMemory* memory) = 0;
+
+ virtual VkResult allocateBufferMemory(VkBuffer buffer,
+ BufferUsage usage,
+ uint32_t allocationPropertyFlags,
+ skgpu::VulkanBackendMemory* memory) = 0;
+
+ // Fills out the passed in skgpu::VulkanAlloc struct for the passed in
+ // skgpu::VulkanBackendMemory.
+ virtual void getAllocInfo(const skgpu::VulkanBackendMemory&, skgpu::VulkanAlloc*) const = 0;
+
+ // Maps the entire allocation and returns a pointer to the start of the allocation. The
+ // implementation may map more memory than just the allocation, but the returned pointer must
+ // point at the start of the memory for the requested allocation.
+ virtual void* mapMemory(const skgpu::VulkanBackendMemory&) { return nullptr; }
+ virtual VkResult mapMemory(const skgpu::VulkanBackendMemory& memory, void** data) {
+ *data = this->mapMemory(memory);
+ // VK_ERROR_INITIALIZATION_FAILED is a bogus result to return from this function, but it is
+ // just something to return that is not VK_SUCCESS and can't be interpreted by a caller to
+ // mean something specific happened like device lost or oom. This will be removed once we
+ // update clients to implement this virtual.
+ return *data ? VK_SUCCESS : VK_ERROR_INITIALIZATION_FAILED;
+ }
+ virtual void unmapMemory(const skgpu::VulkanBackendMemory&) = 0;
+
+ // The following two calls are used for managing non-coherent memory. The offset is relative to
+ // the start of the allocation and not the underlying VkDeviceMemory. Additionaly the client
+ // must make sure that the offset + size passed in is less that or equal to the allocation size.
+ // It is the responsibility of the implementation to make sure all alignment requirements are
+ // followed. The client should not have to deal with any sort of alignment issues.
+ virtual void flushMappedMemory(const skgpu::VulkanBackendMemory&, VkDeviceSize, VkDeviceSize) {}
+ virtual VkResult flushMemory(const skgpu::VulkanBackendMemory& memory,
+ VkDeviceSize offset,
+ VkDeviceSize size) {
+ this->flushMappedMemory(memory, offset, size);
+ return VK_SUCCESS;
+ }
+ virtual void invalidateMappedMemory(const skgpu::VulkanBackendMemory&,
+ VkDeviceSize,
+ VkDeviceSize) {}
+ virtual VkResult invalidateMemory(const skgpu::VulkanBackendMemory& memory,
+ VkDeviceSize offset,
+ VkDeviceSize size) {
+ this->invalidateMappedMemory(memory, offset, size);
+ return VK_SUCCESS;
+ }
+
+ virtual void freeMemory(const skgpu::VulkanBackendMemory&) = 0;
+
+ // Returns the total amount of memory that is allocated as well as total
+ // amount of memory in use by an allocation from this allocator.
+ // Return 1st param is total allocated memory, 2nd is total used memory.
+ virtual std::pair<uint64_t, uint64_t> totalAllocatedAndUsedMemory() const = 0;
+};
+
+} // namespace skgpu
+
+#endif // skgpu_VulkanMemoryAllocator_DEFINED
diff --git a/gfx/skia/skia/include/gpu/vk/VulkanTypes.h b/gfx/skia/skia/include/gpu/vk/VulkanTypes.h
new file mode 100644
index 0000000000..5468c59211
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/vk/VulkanTypes.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2022 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_VulkanTypes_DEFINED
+#define skgpu_VulkanTypes_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "include/private/gpu/vk/SkiaVulkan.h"
+
+#include <functional>
+
+#ifndef VK_VERSION_1_1
+#error Skia requires the use of Vulkan 1.1 headers
+#endif
+
+namespace skgpu {
+
+using VulkanGetProc = std::function<PFN_vkVoidFunction(
+ const char*, // function name
+ VkInstance, // instance or VK_NULL_HANDLE
+ VkDevice // device or VK_NULL_HANDLE
+ )>;
+
+typedef intptr_t VulkanBackendMemory;
+
+/**
+ * Types for interacting with Vulkan resources created externally to Skia.
+ */
+struct VulkanAlloc {
+ // can be VK_NULL_HANDLE iff is an RT and is borrowed
+ VkDeviceMemory fMemory = VK_NULL_HANDLE;
+ VkDeviceSize fOffset = 0;
+ VkDeviceSize fSize = 0; // this can be indeterminate iff Tex uses borrow semantics
+ uint32_t fFlags = 0;
+ // handle to memory allocated via skgpu::VulkanMemoryAllocator.
+ VulkanBackendMemory fBackendMemory = 0;
+
+ enum Flag {
+ kNoncoherent_Flag = 0x1, // memory must be flushed to device after mapping
+ kMappable_Flag = 0x2, // memory is able to be mapped.
+ kLazilyAllocated_Flag = 0x4, // memory was created with lazy allocation
+ };
+
+ bool operator==(const VulkanAlloc& that) const {
+ return fMemory == that.fMemory && fOffset == that.fOffset && fSize == that.fSize &&
+ fFlags == that.fFlags && fUsesSystemHeap == that.fUsesSystemHeap;
+ }
+
+private:
+ bool fUsesSystemHeap = false;
+};
+
+} // namespace skgpu
+
+#endif // skgpu_VulkanTypes_DEFINED