From 26a029d407be480d791972afb5975cf62c9360a6 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Fri, 19 Apr 2024 02:47:55 +0200 Subject: Adding upstream version 124.0.1. Signed-off-by: Daniel Baumann --- dom/webgpu/ipc/PWebGPU.ipdl | 100 +++ dom/webgpu/ipc/PWebGPUTypes.ipdlh | 26 + dom/webgpu/ipc/WebGPUChild.cpp | 270 +++++++ dom/webgpu/ipc/WebGPUChild.h | 115 +++ dom/webgpu/ipc/WebGPUParent.cpp | 1557 +++++++++++++++++++++++++++++++++++++ dom/webgpu/ipc/WebGPUParent.h | 238 ++++++ dom/webgpu/ipc/WebGPUSerialize.h | 63 ++ dom/webgpu/ipc/WebGPUTypes.h | 82 ++ 8 files changed, 2451 insertions(+) create mode 100644 dom/webgpu/ipc/PWebGPU.ipdl create mode 100644 dom/webgpu/ipc/PWebGPUTypes.ipdlh create mode 100644 dom/webgpu/ipc/WebGPUChild.cpp create mode 100644 dom/webgpu/ipc/WebGPUChild.h create mode 100644 dom/webgpu/ipc/WebGPUParent.cpp create mode 100644 dom/webgpu/ipc/WebGPUParent.h create mode 100644 dom/webgpu/ipc/WebGPUSerialize.h create mode 100644 dom/webgpu/ipc/WebGPUTypes.h (limited to 'dom/webgpu/ipc') diff --git a/dom/webgpu/ipc/PWebGPU.ipdl b/dom/webgpu/ipc/PWebGPU.ipdl new file mode 100644 index 0000000000..5146dd6826 --- /dev/null +++ b/dom/webgpu/ipc/PWebGPU.ipdl @@ -0,0 +1,100 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: sw=2 ts=8 et : + */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +using mozilla::layers::RGBDescriptor from "mozilla/layers/LayersSurfaces.h"; +using mozilla::layers::RemoteTextureId from "mozilla/layers/LayersTypes.h"; +using mozilla::layers::RemoteTextureOwnerId from "mozilla/layers/LayersTypes.h"; +using mozilla::layers::RemoteTextureTxnType from "mozilla/layers/LayersTypes.h"; +using mozilla::layers::RemoteTextureTxnId from "mozilla/layers/LayersTypes.h"; +using mozilla::webgpu::RawId from "mozilla/webgpu/WebGPUTypes.h"; +using mozilla::dom::GPUErrorFilter from "mozilla/dom/WebGPUBinding.h"; +using mozilla::dom::GPURequestAdapterOptions from "mozilla/dom/WebGPUBinding.h"; +using mozilla::dom::GPUCommandBufferDescriptor from "mozilla/dom/WebGPUBinding.h"; +using mozilla::dom::GPUBufferDescriptor from "mozilla/dom/WebGPUBinding.h"; +using mozilla::webgpu::PopErrorScopeResult from "mozilla/webgpu/WebGPUTypes.h"; +using mozilla::webgpu::WebGPUCompilationMessage from "mozilla/webgpu/WebGPUTypes.h"; +[MoveOnly] using class mozilla::ipc::UnsafeSharedMemoryHandle from "mozilla/ipc/RawShmem.h"; +using struct mozilla::void_t from "mozilla/ipc/IPCCore.h"; + +include "mozilla/ipc/ByteBufUtils.h"; +include "mozilla/layers/LayersMessageUtils.h"; +include "mozilla/webgpu/WebGPUSerialize.h"; +include "mozilla/layers/WebRenderMessageUtils.h"; +include protocol PCanvasManager; +include PWebGPUTypes; + +namespace mozilla { +namespace webgpu { + +/** + * Represents the connection between a WebGPUChild actor that issues WebGPU + * command from the content process, and a WebGPUParent in the compositor + * process that runs the commands. + */ +async protocol PWebGPU +{ + manager PCanvasManager; + +parent: + async DeviceAction(RawId selfId, ByteBuf buf); + async DeviceActionWithAck(RawId selfId, ByteBuf buf) returns (bool dummy); + async TextureAction(RawId selfId, RawId aDeviceId, ByteBuf buf); + async CommandEncoderAction(RawId selfId, RawId aDeviceId, ByteBuf buf); + async BumpImplicitBindGroupLayout(RawId pipelineId, bool isCompute, uint32_t index, RawId assignId); + + async DeviceCreateBuffer(RawId deviceId, RawId bufferId, GPUBufferDescriptor desc, UnsafeSharedMemoryHandle shm); + + async InstanceRequestAdapter(GPURequestAdapterOptions options, RawId[] ids) returns (ByteBuf byteBuf); + async AdapterRequestDevice(RawId selfId, ByteBuf buf, RawId newId) returns (bool success); + async AdapterDrop(RawId selfId); + // TODO: We want to return an array of compilation messages. + async DeviceCreateShaderModule(RawId selfId, RawId bufferId, nsString label, nsCString code) returns (WebGPUCompilationMessage[] messages); + async BufferMap(RawId deviceId, RawId bufferId, uint32_t aMode, uint64_t offset, uint64_t size) returns (BufferMapResult result); + async BufferUnmap(RawId deviceId, RawId bufferId, bool flush); + async BufferDestroy(RawId selfId); + async BufferDrop(RawId selfId); + async TextureDestroy(RawId selfId, RawId deviceId); + async TextureDrop(RawId selfId); + async TextureViewDrop(RawId selfId); + async SamplerDrop(RawId selfId); + async DeviceDestroy(RawId selfId); + async DeviceDrop(RawId selfId); + + async CommandEncoderFinish(RawId selfId, RawId deviceId, GPUCommandBufferDescriptor desc); + async CommandEncoderDrop(RawId selfId); + async RenderBundleDrop(RawId selfId); + async QueueSubmit(RawId selfId, RawId aDeviceId, RawId[] commandBuffers, RawId[] textureIds); + async QueueOnSubmittedWorkDone(RawId selfId) returns (void_t ok); + async QueueWriteAction(RawId selfId, RawId aDeviceId, ByteBuf buf, UnsafeSharedMemoryHandle shmem); + + async BindGroupLayoutDrop(RawId selfId); + async PipelineLayoutDrop(RawId selfId); + async BindGroupDrop(RawId selfId); + async ShaderModuleDrop(RawId selfId); + async ComputePipelineDrop(RawId selfId); + async RenderPipelineDrop(RawId selfId); + async ImplicitLayoutDrop(RawId implicitPlId, RawId[] implicitBglIds); + async DeviceCreateSwapChain(RawId selfId, RawId queueId, RGBDescriptor desc, RawId[] bufferIds, RemoteTextureOwnerId ownerId, bool useExternalTextureInSwapChain); + async SwapChainPresent(RawId textureId, RawId commandEncoderId, RemoteTextureId remoteTextureId, RemoteTextureOwnerId remoteTextureOwnerId); + async SwapChainDrop(RemoteTextureOwnerId ownerId, RemoteTextureTxnType txnType, RemoteTextureTxnId txnId); + + async DevicePushErrorScope(RawId selfId, GPUErrorFilter aFilter); + async DevicePopErrorScope(RawId selfId) returns (PopErrorScopeResult result); + + // Generate an error on the Device timeline for `deviceId`. + // The `message` parameter is interpreted as UTF-8. + async GenerateError(RawId? deviceId, GPUErrorFilter type, nsCString message); + +child: + async UncapturedError(RawId? aDeviceId, nsCString message); + async DropAction(ByteBuf buf); + async DeviceLost(RawId aDeviceId, uint8_t? reason, nsCString message); + async __delete__(); +}; + +} // webgpu +} // mozilla diff --git a/dom/webgpu/ipc/PWebGPUTypes.ipdlh b/dom/webgpu/ipc/PWebGPUTypes.ipdlh new file mode 100644 index 0000000000..98f062856c --- /dev/null +++ b/dom/webgpu/ipc/PWebGPUTypes.ipdlh @@ -0,0 +1,26 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +using struct mozilla::null_t from "mozilla/ipc/IPCCore.h"; + +namespace mozilla { +namespace webgpu { + +struct BufferMapSuccess { + uint64_t offset; + uint64_t size; + bool writable; +}; + +struct BufferMapError { + nsCString message; +}; + +union BufferMapResult { + BufferMapSuccess; + BufferMapError; +}; + +} // namespace layers +} // namespace mozilla diff --git a/dom/webgpu/ipc/WebGPUChild.cpp b/dom/webgpu/ipc/WebGPUChild.cpp new file mode 100644 index 0000000000..663dd5cb89 --- /dev/null +++ b/dom/webgpu/ipc/WebGPUChild.cpp @@ -0,0 +1,270 @@ +/* -*- Mode: C++; tab-width: 20; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "WebGPUChild.h" + +#include "js/RootingAPI.h" +#include "js/String.h" +#include "js/TypeDecls.h" +#include "js/Value.h" +#include "js/Warnings.h" // JS::WarnUTF8 +#include "mozilla/Assertions.h" +#include "mozilla/Attributes.h" +#include "mozilla/EnumTypeTraits.h" +#include "mozilla/dom/Promise.h" +#include "mozilla/dom/ScriptSettings.h" +#include "mozilla/dom/WebGPUBinding.h" +#include "mozilla/dom/GPUUncapturedErrorEvent.h" +#include "mozilla/webgpu/ValidationError.h" +#include "mozilla/webgpu/WebGPUTypes.h" +#include "mozilla/webgpu/ffi/wgpu.h" +#include "Adapter.h" +#include "DeviceLostInfo.h" +#include "PipelineLayout.h" +#include "Sampler.h" +#include "CompilationInfo.h" +#include "mozilla/ipc/RawShmem.h" +#include "Utility.h" + +#include + +namespace mozilla::webgpu { + +NS_IMPL_CYCLE_COLLECTION(WebGPUChild) + +void WebGPUChild::JsWarning(nsIGlobalObject* aGlobal, + const nsACString& aMessage) { + const auto& flatString = PromiseFlatCString(aMessage); + if (aGlobal) { + dom::AutoJSAPI api; + if (api.Init(aGlobal)) { + JS::WarnUTF8(api.cx(), "%s", flatString.get()); + } + } else { + printf_stderr("Validation error without device target: %s\n", + flatString.get()); + } +} + +static UniquePtr initialize() { + ffi::WGPUInfrastructure infra = ffi::wgpu_client_new(); + return UniquePtr{infra.client}; +} + +WebGPUChild::WebGPUChild() : mClient(initialize()) {} + +WebGPUChild::~WebGPUChild() = default; + +RefPtr WebGPUChild::InstanceRequestAdapter( + const dom::GPURequestAdapterOptions& aOptions) { + const int max_ids = 10; + RawId ids[max_ids] = {0}; + unsigned long count = + ffi::wgpu_client_make_adapter_ids(mClient.get(), ids, max_ids); + + nsTArray sharedIds(count); + for (unsigned long i = 0; i != count; ++i) { + sharedIds.AppendElement(ids[i]); + } + + return SendInstanceRequestAdapter(aOptions, sharedIds) + ->Then( + GetCurrentSerialEventTarget(), __func__, + [](ipc::ByteBuf&& aInfoBuf) { + // Ideally, we'd just send an empty ByteBuf, but the IPC code + // complains if the capacity is zero... + // So for the case where an adapter wasn't found, we just + // transfer a single 0u64 in this buffer. + return aInfoBuf.mLen > sizeof(uint64_t) + ? AdapterPromise::CreateAndResolve(std::move(aInfoBuf), + __func__) + : AdapterPromise::CreateAndReject(Nothing(), __func__); + }, + [](const ipc::ResponseRejectReason& aReason) { + return AdapterPromise::CreateAndReject(Some(aReason), __func__); + }); +} + +Maybe WebGPUChild::AdapterRequestDevice( + RawId aSelfId, const ffi::WGPUDeviceDescriptor& aDesc) { + RawId id = ffi::wgpu_client_make_device_id(mClient.get(), aSelfId); + + ByteBuf bb; + ffi::wgpu_client_serialize_device_descriptor(&aDesc, ToFFI(&bb)); + + DeviceRequest request; + request.mId = id; + request.mPromise = SendAdapterRequestDevice(aSelfId, std::move(bb), id); + + return Some(std::move(request)); +} + +RawId WebGPUChild::RenderBundleEncoderFinish( + ffi::WGPURenderBundleEncoder& aEncoder, RawId aDeviceId, + const dom::GPURenderBundleDescriptor& aDesc) { + ffi::WGPURenderBundleDescriptor desc = {}; + + webgpu::StringHelper label(aDesc.mLabel); + desc.label = label.Get(); + + ipc::ByteBuf bb; + RawId id = ffi::wgpu_client_create_render_bundle( + mClient.get(), &aEncoder, aDeviceId, &desc, ToFFI(&bb)); + + SendDeviceAction(aDeviceId, std::move(bb)); + + return id; +} + +RawId WebGPUChild::RenderBundleEncoderFinishError(RawId aDeviceId, + const nsString& aLabel) { + webgpu::StringHelper label(aLabel); + + ipc::ByteBuf bb; + RawId id = ffi::wgpu_client_create_render_bundle_error( + mClient.get(), aDeviceId, label.Get(), ToFFI(&bb)); + + SendDeviceAction(aDeviceId, std::move(bb)); + + return id; +} + +ipc::IPCResult WebGPUChild::RecvUncapturedError(const Maybe aDeviceId, + const nsACString& aMessage) { + RefPtr device; + if (aDeviceId) { + const auto itr = mDeviceMap.find(*aDeviceId); + if (itr != mDeviceMap.end()) { + device = itr->second.get(); + MOZ_ASSERT(device); + } + } + if (!device) { + JsWarning(nullptr, aMessage); + } else { + // We don't want to spam the errors to the console indefinitely + if (device->CheckNewWarning(aMessage)) { + JsWarning(device->GetOwnerGlobal(), aMessage); + + dom::GPUUncapturedErrorEventInit init; + init.mError = new ValidationError(device->GetParentObject(), aMessage); + RefPtr event = + dom::GPUUncapturedErrorEvent::Constructor( + device, u"uncapturederror"_ns, init); + device->DispatchEvent(*event); + } + } + return IPC_OK(); +} + +ipc::IPCResult WebGPUChild::RecvDropAction(const ipc::ByteBuf& aByteBuf) { + const auto* byteBuf = ToFFI(&aByteBuf); + ffi::wgpu_client_drop_action(mClient.get(), byteBuf); + return IPC_OK(); +} + +ipc::IPCResult WebGPUChild::RecvDeviceLost(RawId aDeviceId, + Maybe aReason, + const nsACString& aMessage) { + RefPtr device; + const auto itr = mDeviceMap.find(aDeviceId); + if (itr != mDeviceMap.end()) { + device = itr->second.get(); + MOZ_ASSERT(device); + } + + if (device) { + auto message = NS_ConvertUTF8toUTF16(aMessage); + if (aReason.isSome()) { + dom::GPUDeviceLostReason reason = + static_cast(*aReason); + device->ResolveLost(Some(reason), message); + } else { + device->ResolveLost(Nothing(), message); + } + } + return IPC_OK(); +} + +void WebGPUChild::DeviceCreateSwapChain( + RawId aSelfId, const RGBDescriptor& aRgbDesc, size_t maxBufferCount, + const layers::RemoteTextureOwnerId& aOwnerId, + bool aUseExternalTextureInSwapChain) { + RawId queueId = aSelfId; // TODO: multiple queues + nsTArray bufferIds(maxBufferCount); + for (size_t i = 0; i < maxBufferCount; ++i) { + bufferIds.AppendElement( + ffi::wgpu_client_make_buffer_id(mClient.get(), aSelfId)); + } + SendDeviceCreateSwapChain(aSelfId, queueId, aRgbDesc, bufferIds, aOwnerId, + aUseExternalTextureInSwapChain); +} + +void WebGPUChild::QueueOnSubmittedWorkDone( + const RawId aSelfId, const RefPtr& aPromise) { + SendQueueOnSubmittedWorkDone(aSelfId)->Then( + GetCurrentSerialEventTarget(), __func__, + [aPromise]() { aPromise->MaybeResolveWithUndefined(); }, + [aPromise](const ipc::ResponseRejectReason& aReason) { + aPromise->MaybeRejectWithNotSupportedError("IPC error"); + }); +} + +void WebGPUChild::SwapChainPresent(RawId aTextureId, + const RemoteTextureId& aRemoteTextureId, + const RemoteTextureOwnerId& aOwnerId) { + // Hack: the function expects `DeviceId`, but it only uses it for `backend()` + // selection. + RawId encoderId = ffi::wgpu_client_make_encoder_id(mClient.get(), aTextureId); + SendSwapChainPresent(aTextureId, encoderId, aRemoteTextureId, aOwnerId); +} + +void WebGPUChild::RegisterDevice(Device* const aDevice) { + mDeviceMap.insert({aDevice->mId, aDevice}); +} + +void WebGPUChild::UnregisterDevice(RawId aDeviceId) { + if (IsOpen()) { + SendDeviceDrop(aDeviceId); + } + mDeviceMap.erase(aDeviceId); +} + +void WebGPUChild::FreeUnregisteredInParentDevice(RawId aId) { + ffi::wgpu_client_kill_device_id(mClient.get(), aId); + mDeviceMap.erase(aId); +} + +void WebGPUChild::ActorDestroy(ActorDestroyReason) { + // Resolving the promise could cause us to update the original map if the + // callee frees the Device objects immediately. Since any remaining entries + // in the map are no longer valid, we can just move the map onto the stack. + const auto deviceMap = std::move(mDeviceMap); + mDeviceMap.clear(); + + for (const auto& targetIter : deviceMap) { + RefPtr device = targetIter.second.get(); + if (!device) { + // The Device may have gotten freed when we resolved the Promise for + // another Device in the map. + continue; + } + + device->ResolveLost(Nothing(), u"WebGPUChild destroyed"_ns); + } +} + +void WebGPUChild::QueueSubmit(RawId aSelfId, RawId aDeviceId, + nsTArray& aCommandBuffers) { + SendQueueSubmit(aSelfId, aDeviceId, aCommandBuffers, + mSwapChainTexturesWaitingForSubmit); + mSwapChainTexturesWaitingForSubmit.Clear(); +} + +void WebGPUChild::NotifyWaitForSubmit(RawId aTextureId) { + mSwapChainTexturesWaitingForSubmit.AppendElement(aTextureId); +} + +} // namespace mozilla::webgpu diff --git a/dom/webgpu/ipc/WebGPUChild.h b/dom/webgpu/ipc/WebGPUChild.h new file mode 100644 index 0000000000..37525420bd --- /dev/null +++ b/dom/webgpu/ipc/WebGPUChild.h @@ -0,0 +1,115 @@ +/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef WEBGPU_CHILD_H_ +#define WEBGPU_CHILD_H_ + +#include "mozilla/webgpu/PWebGPUChild.h" +#include "mozilla/MozPromise.h" +#include "mozilla/WeakPtr.h" +#include "mozilla/webgpu/ffi/wgpu.h" + +namespace mozilla { +namespace ipc { +class UnsafeSharedMemoryHandle; +} // namespace ipc +namespace dom { +struct GPURequestAdapterOptions; +} // namespace dom +namespace layers { +class CompositorBridgeChild; +} // namespace layers +namespace webgpu { +namespace ffi { +struct WGPUClient; +struct WGPULimits; +struct WGPUTextureViewDescriptor; +} // namespace ffi + +using AdapterPromise = + MozPromise, true>; +using PipelinePromise = MozPromise; +using DevicePromise = MozPromise; + +struct PipelineCreationContext { + RawId mParentId = 0; + RawId mImplicitPipelineLayoutId = 0; + nsTArray mImplicitBindGroupLayoutIds; +}; + +struct DeviceRequest { + RawId mId = 0; + RefPtr mPromise; + // Note: we could put `ffi::WGPULimits` in here as well, + // but we don't want to #include ffi stuff in this header +}; + +ffi::WGPUByteBuf* ToFFI(ipc::ByteBuf* x); + +class WebGPUChild final : public PWebGPUChild, public SupportsWeakPtr { + public: + friend class layers::CompositorBridgeChild; + + NS_DECL_CYCLE_COLLECTION_NATIVE_CLASS(WebGPUChild) + NS_INLINE_DECL_CYCLE_COLLECTING_NATIVE_REFCOUNTING_INHERITED(WebGPUChild) + + public: + explicit WebGPUChild(); + + bool IsOpen() const { return CanSend(); } + + RefPtr InstanceRequestAdapter( + const dom::GPURequestAdapterOptions& aOptions); + Maybe AdapterRequestDevice(RawId aSelfId, + const ffi::WGPUDeviceDescriptor&); + RawId RenderBundleEncoderFinish(ffi::WGPURenderBundleEncoder& aEncoder, + RawId aDeviceId, + const dom::GPURenderBundleDescriptor& aDesc); + RawId RenderBundleEncoderFinishError(RawId aDeviceId, const nsString& aLabel); + + ffi::WGPUClient* GetClient() const { return mClient.get(); } + + void DeviceCreateSwapChain(RawId aSelfId, const RGBDescriptor& aRgbDesc, + size_t maxBufferCount, + const layers::RemoteTextureOwnerId& aOwnerId, + bool aUseExternalTextureInSwapChain); + + void QueueOnSubmittedWorkDone(const RawId aSelfId, + const RefPtr& aPromise); + + void SwapChainPresent(RawId aTextureId, + const RemoteTextureId& aRemoteTextureId, + const RemoteTextureOwnerId& aOwnerId); + + void RegisterDevice(Device* const aDevice); + void UnregisterDevice(RawId aId); + void FreeUnregisteredInParentDevice(RawId aId); + + void QueueSubmit(RawId aSelfId, RawId aDeviceId, + nsTArray& aCommandBuffers); + void NotifyWaitForSubmit(RawId aTextureId); + + static void JsWarning(nsIGlobalObject* aGlobal, const nsACString& aMessage); + + private: + virtual ~WebGPUChild(); + + UniquePtr const mClient; + std::unordered_map> mDeviceMap; + nsTArray mSwapChainTexturesWaitingForSubmit; + + public: + ipc::IPCResult RecvUncapturedError(Maybe aDeviceId, + const nsACString& aMessage); + ipc::IPCResult RecvDropAction(const ipc::ByteBuf& aByteBuf); + ipc::IPCResult RecvDeviceLost(RawId aDeviceId, Maybe aReason, + const nsACString& aMessage); + void ActorDestroy(ActorDestroyReason) override; +}; + +} // namespace webgpu +} // namespace mozilla + +#endif // WEBGPU_CHILD_H_ diff --git a/dom/webgpu/ipc/WebGPUParent.cpp b/dom/webgpu/ipc/WebGPUParent.cpp new file mode 100644 index 0000000000..9b79988245 --- /dev/null +++ b/dom/webgpu/ipc/WebGPUParent.cpp @@ -0,0 +1,1557 @@ +/* -*- Mode: C++; tab-width: 20; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "WebGPUParent.h" + +#include + +#include "mozilla/PodOperations.h" +#include "mozilla/ScopeExit.h" +#include "mozilla/dom/WebGPUBinding.h" +#include "mozilla/gfx/FileHandleWrapper.h" +#include "mozilla/layers/CompositorThread.h" +#include "mozilla/layers/ImageDataSerializer.h" +#include "mozilla/layers/RemoteTextureMap.h" +#include "mozilla/layers/TextureHost.h" +#include "mozilla/layers/WebRenderImageHost.h" +#include "mozilla/layers/WebRenderTextureHost.h" +#include "mozilla/webgpu/ExternalTexture.h" +#include "mozilla/webgpu/ffi/wgpu.h" + +#if defined(XP_WIN) +# include "mozilla/gfx/DeviceManagerDx.h" +#endif + +namespace mozilla::webgpu { + +const uint64_t POLL_TIME_MS = 100; + +static mozilla::LazyLogModule sLogger("WebGPU"); + +namespace ffi { + +extern bool wgpu_server_use_external_texture_for_swap_chain( + void* aParam, WGPUSwapChainId aSwapChainId) { + auto* parent = static_cast(aParam); + + return parent->UseExternalTextureForSwapChain(aSwapChainId); +} + +extern bool wgpu_server_ensure_external_texture_for_swap_chain( + void* aParam, WGPUSwapChainId aSwapChainId, WGPUDeviceId aDeviceId, + WGPUTextureId aTextureId, uint32_t aWidth, uint32_t aHeight, + struct WGPUTextureFormat aFormat, WGPUTextureUsages aUsage) { + auto* parent = static_cast(aParam); + + return parent->EnsureExternalTextureForSwapChain( + aSwapChainId, aDeviceId, aTextureId, aWidth, aHeight, aFormat, aUsage); +} + +extern void* wgpu_server_get_external_texture_handle(void* aParam, + WGPUTextureId aId) { + auto* parent = static_cast(aParam); + + auto texture = parent->GetExternalTexture(aId); + if (!texture) { + MOZ_ASSERT_UNREACHABLE("unexpected to be called"); + return nullptr; + } + + void* sharedHandle = nullptr; +#ifdef XP_WIN + sharedHandle = texture->GetExternalTextureHandle(); + if (!sharedHandle) { + MOZ_ASSERT_UNREACHABLE("unexpected to be called"); + gfxCriticalNoteOnce << "Failed to get shared handle"; + return nullptr; + } +#else + MOZ_ASSERT_UNREACHABLE("unexpected to be called"); +#endif + return sharedHandle; +} + +} // namespace ffi + +// A fixed-capacity buffer for receiving textual error messages from +// `wgpu_bindings`. +// +// The `ToFFI` method returns an `ffi::WGPUErrorBuffer` pointing to our +// buffer, for you to pass to fallible FFI-visible `wgpu_bindings` +// functions. These indicate failure by storing an error message in the +// buffer, which you can retrieve by calling `GetError`. +// +// If you call `ToFFI` on this type, you must also call `GetError` to check for +// an error. Otherwise, the destructor asserts. +// +// TODO: refactor this to avoid stack-allocating the buffer all the time. +class ErrorBuffer { + // if the message doesn't fit, it will be truncated + static constexpr unsigned BUFFER_SIZE = 512; + ffi::WGPUErrorBufferType mType = ffi::WGPUErrorBufferType_None; + char mMessageUtf8[BUFFER_SIZE] = {}; + bool mAwaitingGetError = false; + + public: + ErrorBuffer() { mMessageUtf8[0] = 0; } + ErrorBuffer(const ErrorBuffer&) = delete; + ~ErrorBuffer() { MOZ_ASSERT(!mAwaitingGetError); } + + ffi::WGPUErrorBuffer ToFFI() { + mAwaitingGetError = true; + ffi::WGPUErrorBuffer errorBuf = {&mType, mMessageUtf8, BUFFER_SIZE}; + return errorBuf; + } + + ffi::WGPUErrorBufferType GetType() { return mType; } + + static Maybe ErrorTypeToFilterType( + ffi::WGPUErrorBufferType aType) { + switch (aType) { + case ffi::WGPUErrorBufferType_None: + case ffi::WGPUErrorBufferType_DeviceLost: + return {}; + case ffi::WGPUErrorBufferType_Internal: + return Some(dom::GPUErrorFilter::Internal); + case ffi::WGPUErrorBufferType_Validation: + return Some(dom::GPUErrorFilter::Validation); + case ffi::WGPUErrorBufferType_OutOfMemory: + return Some(dom::GPUErrorFilter::Out_of_memory); + case ffi::WGPUErrorBufferType_Sentinel: + break; + } + + MOZ_CRASH("invalid `ErrorBufferType`"); + } + + struct Error { + dom::GPUErrorFilter type; + bool isDeviceLost; + nsCString message; + }; + + // Retrieve the error message was stored in this buffer. Asserts that + // this instance actually contains an error (viz., that `GetType() != + // ffi::WGPUErrorBufferType_None`). + // + // Mark this `ErrorBuffer` as having been handled, so its destructor + // won't assert. + Maybe GetError() { + mAwaitingGetError = false; + if (mType == ffi::WGPUErrorBufferType_DeviceLost) { + // This error is for a lost device, so we return an Error struct + // with the isDeviceLost bool set to true. It doesn't matter what + // GPUErrorFilter type we use, so we just use Validation. The error + // will not be reported. + return Some(Error{dom::GPUErrorFilter::Validation, true, + nsCString{mMessageUtf8}}); + } + auto filterType = ErrorTypeToFilterType(mType); + if (!filterType) { + return {}; + } + return Some(Error{*filterType, false, nsCString{mMessageUtf8}}); + } +}; + +struct PendingSwapChainDrop { + layers::RemoteTextureTxnType mTxnType; + layers::RemoteTextureTxnId mTxnId; +}; + +class PresentationData { + NS_INLINE_DECL_REFCOUNTING(PresentationData); + + public: + WeakPtr mParent; + const bool mUseExternalTextureInSwapChain; + const RawId mDeviceId; + const RawId mQueueId; + const layers::RGBDescriptor mDesc; + + uint64_t mSubmissionIndex = 0; + + std::deque> mRecycledExternalTextures; + + std::unordered_set + mWaitingReadbackTexturesForPresent; + Maybe mPendingSwapChainDrop; + + const uint32_t mSourcePitch; + std::vector mUnassignedBufferIds MOZ_GUARDED_BY(mBuffersLock); + std::vector mAvailableBufferIds MOZ_GUARDED_BY(mBuffersLock); + std::vector mQueuedBufferIds MOZ_GUARDED_BY(mBuffersLock); + Mutex mBuffersLock; + + PresentationData(WebGPUParent* aParent, bool aUseExternalTextureInSwapChain, + RawId aDeviceId, RawId aQueueId, + const layers::RGBDescriptor& aDesc, uint32_t aSourcePitch, + const nsTArray& aBufferIds) + : mParent(aParent), + mUseExternalTextureInSwapChain(aUseExternalTextureInSwapChain), + mDeviceId(aDeviceId), + mQueueId(aQueueId), + mDesc(aDesc), + mSourcePitch(aSourcePitch), + mBuffersLock("WebGPU presentation buffers") { + MOZ_COUNT_CTOR(PresentationData); + + for (const RawId id : aBufferIds) { + mUnassignedBufferIds.push_back(id); + } + } + + private: + ~PresentationData() { MOZ_COUNT_DTOR(PresentationData); } +}; + +WebGPUParent::WebGPUParent() : mContext(ffi::wgpu_server_new(this)) { + mTimer.Start(base::TimeDelta::FromMilliseconds(POLL_TIME_MS), this, + &WebGPUParent::MaintainDevices); +} + +WebGPUParent::~WebGPUParent() { + // All devices should have been dropped, but maybe they weren't. To + // ensure we don't leak memory, clear the mDeviceLostRequests. + mDeviceLostRequests.clear(); +} + +void WebGPUParent::MaintainDevices() { + ffi::wgpu_server_poll_all_devices(mContext.get(), false); +} + +void WebGPUParent::LoseDevice(const RawId aDeviceId, Maybe aReason, + const nsACString& aMessage) { + // Check to see if we've already sent a DeviceLost message to aDeviceId. + if (mLostDeviceIds.Contains(aDeviceId)) { + return; + } + + // If the connection has been dropped, there is nobody to receive + // the DeviceLost message anyway. + if (CanSend()) { + if (!SendDeviceLost(aDeviceId, aReason, aMessage)) { + NS_ERROR("SendDeviceLost failed"); + return; + } + } + + mLostDeviceIds.Insert(aDeviceId); +} + +bool WebGPUParent::ForwardError(const Maybe aDeviceId, + ErrorBuffer& aError) { + if (auto error = aError.GetError()) { + // If this is a error has isDeviceLost true, then instead of reporting + // the error, we swallow it and call LoseDevice if we have an + // aDeviceID. This is to comply with the spec declaration in + // https://gpuweb.github.io/gpuweb/#lose-the-device + // "No errors are generated after device loss." + if (error->isDeviceLost) { + if (aDeviceId.isSome()) { + LoseDevice(*aDeviceId, Nothing(), error->message); + } + return false; + } + ReportError(aDeviceId, error->type, error->message); + return true; + } + return false; +} + +// Generate an error on the Device timeline of aDeviceId. +// aMessage is interpreted as UTF-8. +void WebGPUParent::ReportError(const Maybe aDeviceId, + const GPUErrorFilter aType, + const nsCString& aMessage) { + // find the appropriate error scope + if (aDeviceId) { + const auto& itr = mErrorScopeStackByDevice.find(*aDeviceId); + if (itr != mErrorScopeStackByDevice.end()) { + auto& stack = itr->second; + for (auto& scope : Reversed(stack)) { + if (scope.filter != aType) { + continue; + } + if (!scope.firstMessage) { + scope.firstMessage = Some(aMessage); + } + return; + } + } + } + // No error scope found, so fall back to the uncaptured error handler + if (!SendUncapturedError(aDeviceId, aMessage)) { + NS_ERROR("SendDeviceUncapturedError failed"); + } +} + +ipc::IPCResult WebGPUParent::RecvInstanceRequestAdapter( + const dom::GPURequestAdapterOptions& aOptions, + const nsTArray& aTargetIds, + InstanceRequestAdapterResolver&& resolver) { + ffi::WGPURequestAdapterOptions options = {}; + if (aOptions.mPowerPreference.WasPassed()) { + options.power_preference = static_cast( + aOptions.mPowerPreference.Value()); + } else { + options.power_preference = ffi::WGPUPowerPreference_LowPower; + } + options.force_fallback_adapter = aOptions.mForceFallbackAdapter; + + auto luid = GetCompositorDeviceLuid(); + + ErrorBuffer error; + int8_t index = ffi::wgpu_server_instance_request_adapter( + mContext.get(), &options, aTargetIds.Elements(), aTargetIds.Length(), + luid.ptrOr(nullptr), error.ToFFI()); + + ByteBuf infoByteBuf; + // Rust side expects an `Option`, so 0 maps to `None`. + uint64_t adapterId = 0; + if (index >= 0) { + adapterId = aTargetIds[index]; + } + ffi::wgpu_server_adapter_pack_info(mContext.get(), adapterId, + ToFFI(&infoByteBuf)); + resolver(std::move(infoByteBuf)); + ForwardError(0, error); + + // free the unused IDs + ipc::ByteBuf dropByteBuf; + for (size_t i = 0; i < aTargetIds.Length(); ++i) { + if (static_cast(i) != index) { + wgpu_server_adapter_free(aTargetIds[i], ToFFI(&dropByteBuf)); + } + } + if (dropByteBuf.mData && !SendDropAction(std::move(dropByteBuf))) { + NS_ERROR("Unable to free free unused adapter IDs"); + } + return IPC_OK(); +} + +/* static */ void WebGPUParent::DeviceLostCallback(uint8_t* aUserData, + uint8_t aReason, + const char* aMessage) { + DeviceLostRequest* req = reinterpret_cast(aUserData); + if (!req->mParent) { + // Parent is dead, never mind. + return; + } + + RawId deviceId = req->mDeviceId; + + // If aReason is 0, that corresponds to the "unknown" reason, which + // we treat as a Nothing() value. Any other value (which is positive) + // is mapped to the GPUDeviceLostReason values by subtracting 1. + Maybe reason; + if (aReason > 0) { + uint8_t mappedReasonValue = (aReason - 1u); + reason = Some(mappedReasonValue); + } + nsAutoCString message(aMessage); + req->mParent->LoseDevice(deviceId, reason, message); + + // We're no longer tracking the memory for this callback, so erase + // it to ensure we don't leak memory. + req->mParent->mDeviceLostRequests.erase(deviceId); +} + +ipc::IPCResult WebGPUParent::RecvAdapterRequestDevice( + RawId aAdapterId, const ipc::ByteBuf& aByteBuf, RawId aDeviceId, + AdapterRequestDeviceResolver&& resolver) { + ErrorBuffer error; + ffi::wgpu_server_adapter_request_device( + mContext.get(), aAdapterId, ToFFI(&aByteBuf), aDeviceId, error.ToFFI()); + if (ForwardError(0, error)) { + uint8_t reasonDestroyed = 0; // GPUDeviceLostReason::Destroyed + auto maybeError = error.GetError(); + MOZ_ASSERT(maybeError.isSome()); + LoseDevice(aDeviceId, Some(reasonDestroyed), maybeError->message); + resolver(false); + return IPC_OK(); + } + + mErrorScopeStackByDevice.insert({aDeviceId, {}}); + + // Setup the device lost callback. + std::unique_ptr req( + new DeviceLostRequest{this, aDeviceId}); + auto iter = mDeviceLostRequests.insert({aDeviceId, std::move(req)}); + MOZ_ASSERT(iter.second, "Should be able to insert DeviceLostRequest."); + auto record = iter.first; + DeviceLostRequest* req_shadow = (record->second).get(); + ffi::WGPUDeviceLostClosureC callback = { + &DeviceLostCallback, reinterpret_cast(req_shadow)}; + ffi::wgpu_server_set_device_lost_callback(mContext.get(), aDeviceId, + callback); + + resolver(true); + +#if defined(XP_WIN) + HANDLE handle = + wgpu_server_get_device_fence_handle(mContext.get(), aDeviceId); + if (handle) { + mFenceHandle = new gfx::FileHandleWrapper(UniqueFileHandle(handle)); + } +#endif + + return IPC_OK(); +} + +ipc::IPCResult WebGPUParent::RecvAdapterDrop(RawId aAdapterId) { + ffi::wgpu_server_adapter_drop(mContext.get(), aAdapterId); + return IPC_OK(); +} + +ipc::IPCResult WebGPUParent::RecvDeviceDestroy(RawId aDeviceId) { + ffi::wgpu_server_device_destroy(mContext.get(), aDeviceId); + return IPC_OK(); +} + +ipc::IPCResult WebGPUParent::RecvDeviceDrop(RawId aDeviceId) { + ffi::wgpu_server_device_drop(mContext.get(), aDeviceId); + MOZ_ASSERT(mDeviceLostRequests.find(aDeviceId) == mDeviceLostRequests.end(), + "DeviceLostRequest should have been invoked, then erased."); + + mErrorScopeStackByDevice.erase(aDeviceId); + mLostDeviceIds.Remove(aDeviceId); + return IPC_OK(); +} + +WebGPUParent::BufferMapData* WebGPUParent::GetBufferMapData(RawId aBufferId) { + const auto iter = mSharedMemoryMap.find(aBufferId); + if (iter == mSharedMemoryMap.end()) { + return nullptr; + } + + return &iter->second; +} + +ipc::IPCResult WebGPUParent::RecvDeviceCreateBuffer( + RawId aDeviceId, RawId aBufferId, dom::GPUBufferDescriptor&& aDesc, + ipc::UnsafeSharedMemoryHandle&& aShmem) { + webgpu::StringHelper label(aDesc.mLabel); + + auto shmem = + ipc::WritableSharedMemoryMapping::Open(std::move(aShmem)).value(); + + bool hasMapFlags = aDesc.mUsage & (dom::GPUBufferUsage_Binding::MAP_WRITE | + dom::GPUBufferUsage_Binding::MAP_READ); + bool shmAllocationFailed = false; + if (hasMapFlags || aDesc.mMappedAtCreation) { + if (shmem.Size() < aDesc.mSize) { + MOZ_RELEASE_ASSERT(shmem.Size() == 0); + // If we requested a non-zero mappable buffer and get a size of zero, it + // indicates that the shmem allocation failed on the client side. + shmAllocationFailed = true; + } else { + uint64_t offset = 0; + uint64_t size = 0; + + if (aDesc.mMappedAtCreation) { + size = aDesc.mSize; + } + + BufferMapData data = {std::move(shmem), hasMapFlags, offset, size, + aDeviceId}; + mSharedMemoryMap.insert({aBufferId, std::move(data)}); + } + } + + ErrorBuffer error; + ffi::wgpu_server_device_create_buffer(mContext.get(), aDeviceId, aBufferId, + label.Get(), aDesc.mSize, aDesc.mUsage, + aDesc.mMappedAtCreation, + shmAllocationFailed, error.ToFFI()); + ForwardError(aDeviceId, error); + return IPC_OK(); +} + +struct MapRequest { + RefPtr mParent; + ffi::WGPUGlobal* mContext; + ffi::WGPUBufferId mBufferId; + ffi::WGPUHostMap mHostMap; + uint64_t mOffset; + uint64_t mSize; + WebGPUParent::BufferMapResolver mResolver; +}; + +static const char* MapStatusString(ffi::WGPUBufferMapAsyncStatus status) { + switch (status) { + case ffi::WGPUBufferMapAsyncStatus_Success: + return "Success"; + case ffi::WGPUBufferMapAsyncStatus_AlreadyMapped: + return "Already mapped"; + case ffi::WGPUBufferMapAsyncStatus_MapAlreadyPending: + return "Map is already pending"; + case ffi::WGPUBufferMapAsyncStatus_Aborted: + return "Map aborted"; + case ffi::WGPUBufferMapAsyncStatus_ContextLost: + return "Context lost"; + case ffi::WGPUBufferMapAsyncStatus_Invalid: + return "Invalid buffer"; + case ffi::WGPUBufferMapAsyncStatus_InvalidRange: + return "Invalid range"; + case ffi::WGPUBufferMapAsyncStatus_InvalidAlignment: + return "Invalid alignment"; + case ffi::WGPUBufferMapAsyncStatus_InvalidUsageFlags: + return "Invalid usage flags"; + case ffi::WGPUBufferMapAsyncStatus_Error: + return "Map failed"; + case ffi::WGPUBufferMapAsyncStatus_Sentinel: // For -Wswitch + break; + } + + MOZ_CRASH("Bad ffi::WGPUBufferMapAsyncStatus"); +} + +void WebGPUParent::MapCallback(ffi::WGPUBufferMapAsyncStatus aStatus, + uint8_t* aUserData) { + auto* req = reinterpret_cast(aUserData); + + if (!req->mParent->CanSend()) { + delete req; + return; + } + + BufferMapResult result; + + auto bufferId = req->mBufferId; + auto* mapData = req->mParent->GetBufferMapData(bufferId); + MOZ_RELEASE_ASSERT(mapData); + + if (aStatus != ffi::WGPUBufferMapAsyncStatus_Success) { + // A buffer map operation that fails with a DeviceError gets + // mapped to the ContextLost status. If we have this status, we + // need to lose the device. + if (aStatus == ffi::WGPUBufferMapAsyncStatus_ContextLost) { + req->mParent->LoseDevice( + mapData->mDeviceId, Nothing(), + nsPrintfCString("Buffer %" PRIu64 " invalid", bufferId)); + } + + result = BufferMapError(nsPrintfCString("Mapping WebGPU buffer failed: %s", + MapStatusString(aStatus))); + } else { + auto size = req->mSize; + auto offset = req->mOffset; + + if (req->mHostMap == ffi::WGPUHostMap_Read && size > 0) { + ErrorBuffer error; + const auto src = ffi::wgpu_server_buffer_get_mapped_range( + req->mContext, req->mBufferId, offset, size, error.ToFFI()); + + MOZ_RELEASE_ASSERT(!error.GetError()); + + MOZ_RELEASE_ASSERT(mapData->mShmem.Size() >= offset + size); + if (src.ptr != nullptr && src.length >= size) { + auto dst = mapData->mShmem.Bytes().Subspan(offset, size); + memcpy(dst.data(), src.ptr, size); + } + } + + result = + BufferMapSuccess(offset, size, req->mHostMap == ffi::WGPUHostMap_Write); + + mapData->mMappedOffset = offset; + mapData->mMappedSize = size; + } + + req->mResolver(std::move(result)); + delete req; +} + +ipc::IPCResult WebGPUParent::RecvBufferMap(RawId aDeviceId, RawId aBufferId, + uint32_t aMode, uint64_t aOffset, + uint64_t aSize, + BufferMapResolver&& aResolver) { + MOZ_LOG(sLogger, LogLevel::Info, + ("RecvBufferMap %" PRIu64 " offset=%" PRIu64 " size=%" PRIu64 "\n", + aBufferId, aOffset, aSize)); + + ffi::WGPUHostMap mode; + switch (aMode) { + case dom::GPUMapMode_Binding::READ: + mode = ffi::WGPUHostMap_Read; + break; + case dom::GPUMapMode_Binding::WRITE: + mode = ffi::WGPUHostMap_Write; + break; + default: { + nsCString errorString( + "GPUBuffer.mapAsync 'mode' argument must be either GPUMapMode.READ " + "or GPUMapMode.WRITE"); + aResolver(BufferMapError(errorString)); + return IPC_OK(); + } + } + + auto* mapData = GetBufferMapData(aBufferId); + + if (!mapData) { + nsCString errorString("Buffer is not mappable"); + aResolver(BufferMapError(errorString)); + return IPC_OK(); + } + + auto* request = + new MapRequest{this, mContext.get(), aBufferId, mode, + aOffset, aSize, std::move(aResolver)}; + + ffi::WGPUBufferMapCallbackC callback = {&MapCallback, + reinterpret_cast(request)}; + ErrorBuffer mapError; + ffi::wgpu_server_buffer_map(mContext.get(), aBufferId, aOffset, aSize, mode, + callback, mapError.ToFFI()); + ForwardError(aDeviceId, mapError); + + return IPC_OK(); +} + +ipc::IPCResult WebGPUParent::RecvBufferUnmap(RawId aDeviceId, RawId aBufferId, + bool aFlush) { + MOZ_LOG(sLogger, LogLevel::Info, + ("RecvBufferUnmap %" PRIu64 " flush=%d\n", aBufferId, aFlush)); + + auto* mapData = GetBufferMapData(aBufferId); + + if (mapData && aFlush) { + uint64_t offset = mapData->mMappedOffset; + uint64_t size = mapData->mMappedSize; + + ErrorBuffer getRangeError; + const auto mapped = ffi::wgpu_server_buffer_get_mapped_range( + mContext.get(), aBufferId, offset, size, getRangeError.ToFFI()); + ForwardError(aDeviceId, getRangeError); + + if (mapped.ptr != nullptr && mapped.length >= size) { + auto shmSize = mapData->mShmem.Size(); + MOZ_RELEASE_ASSERT(offset <= shmSize); + MOZ_RELEASE_ASSERT(size <= shmSize - offset); + + auto src = mapData->mShmem.Bytes().Subspan(offset, size); + memcpy(mapped.ptr, src.data(), size); + } + + mapData->mMappedOffset = 0; + mapData->mMappedSize = 0; + } + + ErrorBuffer unmapError; + ffi::wgpu_server_buffer_unmap(mContext.get(), aBufferId, unmapError.ToFFI()); + ForwardError(aDeviceId, unmapError); + + if (mapData && !mapData->mHasMapFlags) { + // We get here if the buffer was mapped at creation without map flags. + // We don't need the shared memory anymore. + DeallocBufferShmem(aBufferId); + } + + return IPC_OK(); +} + +void WebGPUParent::DeallocBufferShmem(RawId aBufferId) { + const auto iter = mSharedMemoryMap.find(aBufferId); + if (iter != mSharedMemoryMap.end()) { + mSharedMemoryMap.erase(iter); + } +} + +ipc::IPCResult WebGPUParent::RecvBufferDrop(RawId aBufferId) { + ffi::wgpu_server_buffer_drop(mContext.get(), aBufferId); + MOZ_LOG(sLogger, LogLevel::Info, ("RecvBufferDrop %" PRIu64 "\n", aBufferId)); + + DeallocBufferShmem(aBufferId); + + return IPC_OK(); +} + +ipc::IPCResult WebGPUParent::RecvBufferDestroy(RawId aBufferId) { + ffi::wgpu_server_buffer_destroy(mContext.get(), aBufferId); + MOZ_LOG(sLogger, LogLevel::Info, + ("RecvBufferDestroy %" PRIu64 "\n", aBufferId)); + + DeallocBufferShmem(aBufferId); + + return IPC_OK(); +} + +void WebGPUParent::RemoveExternalTexture(RawId aTextureId) { + auto it = mExternalTextures.find(aTextureId); + if (it != mExternalTextures.end()) { + mExternalTextures.erase(it); + } +} + +ipc::IPCResult WebGPUParent::RecvTextureDestroy(RawId aTextureId, + RawId aDeviceId) { + ffi::wgpu_server_texture_destroy(mContext.get(), aTextureId); + RemoveExternalTexture(aTextureId); + return IPC_OK(); +} + +ipc::IPCResult WebGPUParent::RecvTextureDrop(RawId aTextureId) { + ffi::wgpu_server_texture_drop(mContext.get(), aTextureId); + RemoveExternalTexture(aTextureId); + return IPC_OK(); +} + +ipc::IPCResult WebGPUParent::RecvTextureViewDrop(RawId aTextureViewId) { + ffi::wgpu_server_texture_view_drop(mContext.get(), aTextureViewId); + return IPC_OK(); +} + +ipc::IPCResult WebGPUParent::RecvSamplerDrop(RawId aSamplerId) { + ffi::wgpu_server_sampler_drop(mContext.get(), aSamplerId); + return IPC_OK(); +} + +ipc::IPCResult WebGPUParent::RecvCommandEncoderFinish( + RawId aEncoderId, RawId aDeviceId, + const dom::GPUCommandBufferDescriptor& aDesc) { + Unused << aDesc; + ffi::WGPUCommandBufferDescriptor desc = {}; + + webgpu::StringHelper label(aDesc.mLabel); + desc.label = label.Get(); + + ErrorBuffer error; + ffi::wgpu_server_encoder_finish(mContext.get(), aEncoderId, &desc, + error.ToFFI()); + + ForwardError(aDeviceId, error); + return IPC_OK(); +} + +ipc::IPCResult WebGPUParent::RecvCommandEncoderDrop(RawId aEncoderId) { + ffi::wgpu_server_encoder_drop(mContext.get(), aEncoderId); + return IPC_OK(); +} + +ipc::IPCResult WebGPUParent::RecvRenderBundleDrop(RawId aBundleId) { + ffi::wgpu_server_render_bundle_drop(mContext.get(), aBundleId); + return IPC_OK(); +} + +ipc::IPCResult WebGPUParent::RecvQueueSubmit( + RawId aQueueId, RawId aDeviceId, const nsTArray& aCommandBuffers, + const nsTArray& aTextureIds) { + ErrorBuffer error; + auto index = ffi::wgpu_server_queue_submit( + mContext.get(), aQueueId, aCommandBuffers.Elements(), + aCommandBuffers.Length(), error.ToFFI()); + // Check if index is valid. 0 means error. + if (index != 0) { + for (const auto& textureId : aTextureIds) { + auto it = mExternalTextures.find(textureId); + if (it != mExternalTextures.end()) { + auto& externalTexture = it->second; + + externalTexture->SetSubmissionIndex(index); + } + } + } + ForwardError(aDeviceId, error); + return IPC_OK(); +} + +struct OnSubmittedWorkDoneRequest { + RefPtr mParent; + WebGPUParent::QueueOnSubmittedWorkDoneResolver mResolver; +}; + +void OnSubmittedWorkDoneCallback(uint8_t* userdata) { + auto req = std::unique_ptr( + reinterpret_cast(userdata)); + if (req->mParent->CanSend()) { + req->mResolver(void_t()); + } +} + +ipc::IPCResult WebGPUParent::RecvQueueOnSubmittedWorkDone( + RawId aQueueId, std::function&& aResolver) { + std::unique_ptr request( + new OnSubmittedWorkDoneRequest{this, std::move(aResolver)}); + + ffi::WGPUSubmittedWorkDoneClosureC callback = { + &OnSubmittedWorkDoneCallback, + reinterpret_cast(request.release())}; + ffi::wgpu_server_on_submitted_work_done(mContext.get(), aQueueId, callback); + return IPC_OK(); +} + +ipc::IPCResult WebGPUParent::RecvQueueWriteAction( + RawId aQueueId, RawId aDeviceId, const ipc::ByteBuf& aByteBuf, + ipc::UnsafeSharedMemoryHandle&& aShmem) { + auto mapping = + ipc::WritableSharedMemoryMapping::Open(std::move(aShmem)).value(); + + ErrorBuffer error; + ffi::wgpu_server_queue_write_action(mContext.get(), aQueueId, + ToFFI(&aByteBuf), mapping.Bytes().data(), + mapping.Size(), error.ToFFI()); + ForwardError(aDeviceId, error); + return IPC_OK(); +} + +ipc::IPCResult WebGPUParent::RecvBindGroupLayoutDrop(RawId aBindGroupId) { + ffi::wgpu_server_bind_group_layout_drop(mContext.get(), aBindGroupId); + return IPC_OK(); +} + +ipc::IPCResult WebGPUParent::RecvPipelineLayoutDrop(RawId aLayoutId) { + ffi::wgpu_server_pipeline_layout_drop(mContext.get(), aLayoutId); + return IPC_OK(); +} + +ipc::IPCResult WebGPUParent::RecvBindGroupDrop(RawId aBindGroupId) { + ffi::wgpu_server_bind_group_drop(mContext.get(), aBindGroupId); + return IPC_OK(); +} + +ipc::IPCResult WebGPUParent::RecvShaderModuleDrop(RawId aModuleId) { + ffi::wgpu_server_shader_module_drop(mContext.get(), aModuleId); + return IPC_OK(); +} + +ipc::IPCResult WebGPUParent::RecvComputePipelineDrop(RawId aPipelineId) { + ffi::wgpu_server_compute_pipeline_drop(mContext.get(), aPipelineId); + return IPC_OK(); +} + +ipc::IPCResult WebGPUParent::RecvRenderPipelineDrop(RawId aPipelineId) { + ffi::wgpu_server_render_pipeline_drop(mContext.get(), aPipelineId); + return IPC_OK(); +} + +ipc::IPCResult WebGPUParent::RecvImplicitLayoutDrop( + RawId aImplicitPlId, const nsTArray& aImplicitBglIds) { + ffi::wgpu_server_pipeline_layout_drop(mContext.get(), aImplicitPlId); + for (const auto& id : aImplicitBglIds) { + ffi::wgpu_server_bind_group_layout_drop(mContext.get(), id); + } + return IPC_OK(); +} + +// TODO: proper destruction + +ipc::IPCResult WebGPUParent::RecvDeviceCreateSwapChain( + RawId aDeviceId, RawId aQueueId, const RGBDescriptor& aDesc, + const nsTArray& aBufferIds, + const layers::RemoteTextureOwnerId& aOwnerId, + bool aUseExternalTextureInSwapChain) { + switch (aDesc.format()) { + case gfx::SurfaceFormat::R8G8B8A8: + case gfx::SurfaceFormat::B8G8R8A8: + break; + default: + MOZ_ASSERT_UNREACHABLE("Invalid surface format!"); + return IPC_OK(); + } + + const auto bufferStrideWithMask = + Device::BufferStrideWithMask(aDesc.size(), aDesc.format()); + if (!bufferStrideWithMask.isValid()) { + MOZ_ASSERT_UNREACHABLE("Invalid width / buffer stride!"); + return IPC_OK(); + } + + constexpr uint32_t kBufferAlignmentMask = 0xff; + const uint32_t bufferStride = + bufferStrideWithMask.value() & ~kBufferAlignmentMask; + + const auto rows = CheckedInt(aDesc.size().height); + if (!rows.isValid()) { + MOZ_ASSERT_UNREACHABLE("Invalid height!"); + return IPC_OK(); + } + + if (!mRemoteTextureOwner) { + mRemoteTextureOwner = + MakeRefPtr(OtherPid()); + } + mRemoteTextureOwner->RegisterTextureOwner(aOwnerId); + + auto data = MakeRefPtr(this, aUseExternalTextureInSwapChain, + aDeviceId, aQueueId, aDesc, + bufferStride, aBufferIds); + if (!mPresentationDataMap.emplace(aOwnerId, data).second) { + NS_ERROR("External image is already registered as WebGPU canvas!"); + } + return IPC_OK(); +} + +ipc::IPCResult WebGPUParent::RecvDeviceCreateShaderModule( + RawId aDeviceId, RawId aModuleId, const nsString& aLabel, + const nsCString& aCode, DeviceCreateShaderModuleResolver&& aOutMessage) { + // TODO: this should probably be an optional label in the IPC message. + const nsACString* label = nullptr; + NS_ConvertUTF16toUTF8 utf8Label(aLabel); + if (!utf8Label.IsEmpty()) { + label = &utf8Label; + } + + ffi::WGPUShaderModuleCompilationMessage message; + ErrorBuffer error; + + bool ok = ffi::wgpu_server_device_create_shader_module( + mContext.get(), aDeviceId, aModuleId, label, &aCode, &message, + error.ToFFI()); + + ForwardError(aDeviceId, error); + + nsTArray messages; + + if (!ok) { + WebGPUCompilationMessage msg; + msg.lineNum = message.line_number; + msg.linePos = message.line_pos; + msg.offset = message.utf16_offset; + msg.length = message.utf16_length; + msg.message = message.message; + // wgpu currently only returns errors. + msg.messageType = WebGPUCompilationMessageType::Error; + + messages.AppendElement(msg); + } + + aOutMessage(messages); + + return IPC_OK(); +} + +struct ReadbackPresentRequest { + ReadbackPresentRequest( + const ffi::WGPUGlobal* aContext, RefPtr& aData, + RefPtr& aRemoteTextureOwner, + const layers::RemoteTextureId aTextureId, + const layers::RemoteTextureOwnerId aOwnerId) + : mContext(aContext), + mData(aData), + mRemoteTextureOwner(aRemoteTextureOwner), + mTextureId(aTextureId), + mOwnerId(aOwnerId) {} + + const ffi::WGPUGlobal* mContext; + RefPtr mData; + RefPtr mRemoteTextureOwner; + const layers::RemoteTextureId mTextureId; + const layers::RemoteTextureOwnerId mOwnerId; +}; + +static void ReadbackPresentCallback(ffi::WGPUBufferMapAsyncStatus status, + uint8_t* userdata) { + UniquePtr req( + reinterpret_cast(userdata)); + + const auto onExit = mozilla::MakeScopeExit([&]() { + auto& waitingTextures = req->mData->mWaitingReadbackTexturesForPresent; + auto it = waitingTextures.find(req->mTextureId); + MOZ_ASSERT(it != waitingTextures.end()); + if (it != waitingTextures.end()) { + waitingTextures.erase(it); + } + if (req->mData->mPendingSwapChainDrop.isSome() && waitingTextures.empty()) { + if (req->mData->mParent) { + auto& pendingDrop = req->mData->mPendingSwapChainDrop.ref(); + req->mData->mParent->RecvSwapChainDrop( + req->mOwnerId, pendingDrop.mTxnType, pendingDrop.mTxnId); + req->mData->mPendingSwapChainDrop = Nothing(); + } + } + }); + + if (!req->mRemoteTextureOwner->IsRegistered(req->mOwnerId)) { + // SwapChain is already Destroyed + return; + } + + PresentationData* data = req->mData.get(); + // get the buffer ID + RawId bufferId; + { + MutexAutoLock lock(data->mBuffersLock); + bufferId = data->mQueuedBufferIds.back(); + data->mQueuedBufferIds.pop_back(); + } + + // Ensure we'll make the bufferId available for reuse + auto releaseBuffer = MakeScopeExit([data = RefPtr{data}, bufferId] { + MutexAutoLock lock(data->mBuffersLock); + data->mAvailableBufferIds.push_back(bufferId); + }); + + MOZ_LOG(sLogger, LogLevel::Info, + ("ReadbackPresentCallback for buffer %" PRIu64 " status=%d\n", + bufferId, status)); + // copy the data + if (status == ffi::WGPUBufferMapAsyncStatus_Success) { + const auto bufferSize = data->mDesc.size().height * data->mSourcePitch; + ErrorBuffer getRangeError; + const auto mapped = ffi::wgpu_server_buffer_get_mapped_range( + req->mContext, bufferId, 0, bufferSize, getRangeError.ToFFI()); + if (req->mData->mParent) { + req->mData->mParent->ForwardError(data->mDeviceId, getRangeError); + } else if (auto innerError = getRangeError.GetError()) { + // If an error occured in get_mapped_range, treat it as an internal error + // and crash. The error handling story for something unexpected happening + // during the present glue needs to befigured out in a more global way. + MOZ_LOG(sLogger, LogLevel::Info, + ("WebGPU present: buffer get_mapped_range failed: %s\n", + innerError->message.get())); + } + + MOZ_RELEASE_ASSERT(mapped.length >= bufferSize); + auto textureData = + req->mRemoteTextureOwner->CreateOrRecycleBufferTextureData( + data->mDesc.size(), data->mDesc.format(), req->mOwnerId); + if (!textureData) { + gfxCriticalNoteOnce << "Failed to allocate BufferTextureData"; + return; + } + layers::MappedTextureData mappedData; + if (textureData && textureData->BorrowMappedData(mappedData)) { + uint8_t* src = mapped.ptr; + uint8_t* dst = mappedData.data; + for (auto row = 0; row < data->mDesc.size().height; ++row) { + memcpy(dst, src, mappedData.stride); + dst += mappedData.stride; + src += data->mSourcePitch; + } + req->mRemoteTextureOwner->PushTexture(req->mTextureId, req->mOwnerId, + std::move(textureData)); + } else { + NS_WARNING("WebGPU present skipped: the swapchain is resized!"); + } + ErrorBuffer unmapError; + wgpu_server_buffer_unmap(req->mContext, bufferId, unmapError.ToFFI()); + if (req->mData->mParent) { + req->mData->mParent->ForwardError(data->mDeviceId, unmapError); + } else if (auto innerError = unmapError.GetError()) { + MOZ_LOG(sLogger, LogLevel::Info, + ("WebGPU present: buffer unmap failed: %s\n", + innerError->message.get())); + } + } else { + // TODO: better handle errors + NS_WARNING("WebGPU frame mapping failed!"); + } +} + +ipc::IPCResult WebGPUParent::GetFrontBufferSnapshot( + IProtocol* aProtocol, const layers::RemoteTextureOwnerId& aOwnerId, + Maybe& aShmem, gfx::IntSize& aSize) { + const auto& lookup = mPresentationDataMap.find(aOwnerId); + if (lookup == mPresentationDataMap.end() || !mRemoteTextureOwner || + !mRemoteTextureOwner->IsRegistered(aOwnerId)) { + return IPC_OK(); + } + + RefPtr data = lookup->second.get(); + aSize = data->mDesc.size(); + uint32_t stride = layers::ImageDataSerializer::ComputeRGBStride( + data->mDesc.format(), aSize.width); + uint32_t len = data->mDesc.size().height * stride; + Shmem shmem; + if (!AllocShmem(len, &shmem)) { + return IPC_OK(); + } + + mRemoteTextureOwner->GetLatestBufferSnapshot(aOwnerId, shmem, aSize); + aShmem.emplace(std::move(shmem)); + + return IPC_OK(); +} + +void WebGPUParent::PostExternalTexture( + const std::shared_ptr&& aExternalTexture, + const layers::RemoteTextureId aRemoteTextureId, + const layers::RemoteTextureOwnerId aOwnerId) { + const auto& lookup = mPresentationDataMap.find(aOwnerId); + if (lookup == mPresentationDataMap.end() || !mRemoteTextureOwner || + !mRemoteTextureOwner->IsRegistered(aOwnerId)) { + NS_WARNING("WebGPU presenting on a destroyed swap chain!"); + return; + } + + const auto surfaceFormat = gfx::SurfaceFormat::B8G8R8A8; + const auto size = aExternalTexture->GetSize(); + const auto index = aExternalTexture->GetSubmissionIndex(); + MOZ_ASSERT(index != 0); + + Maybe fenceInfo; + if (mFenceHandle) { + fenceInfo = Some(gfx::FenceInfo(mFenceHandle, index)); + } + + Maybe desc = + aExternalTexture->ToSurfaceDescriptor(fenceInfo); + if (!desc) { + MOZ_ASSERT_UNREACHABLE("unexpected to be called"); + return; + } + + mRemoteTextureOwner->PushTexture(aRemoteTextureId, aOwnerId, aExternalTexture, + size, surfaceFormat, *desc); + + RefPtr data = lookup->second.get(); + + auto recycledTexture = mRemoteTextureOwner->GetRecycledExternalTexture( + size, surfaceFormat, desc->type(), aOwnerId); + if (recycledTexture) { + data->mRecycledExternalTextures.push_back(recycledTexture); + } +} + +ipc::IPCResult WebGPUParent::RecvSwapChainPresent( + RawId aTextureId, RawId aCommandEncoderId, + const layers::RemoteTextureId& aRemoteTextureId, + const layers::RemoteTextureOwnerId& aOwnerId) { + // step 0: get the data associated with the swapchain + const auto& lookup = mPresentationDataMap.find(aOwnerId); + if (lookup == mPresentationDataMap.end() || !mRemoteTextureOwner || + !mRemoteTextureOwner->IsRegistered(aOwnerId)) { + NS_WARNING("WebGPU presenting on a destroyed swap chain!"); + return IPC_OK(); + } + + RefPtr data = lookup->second.get(); + + if (data->mUseExternalTextureInSwapChain) { + auto it = mExternalTextures.find(aTextureId); + if (it == mExternalTextures.end()) { + MOZ_ASSERT_UNREACHABLE("unexpected to be called"); + return IPC_OK(); + } + std::shared_ptr externalTexture = it->second; + mExternalTextures.erase(it); + + PostExternalTexture(std::move(externalTexture), aRemoteTextureId, aOwnerId); + return IPC_OK(); + } + + RawId bufferId = 0; + const auto& size = data->mDesc.size(); + const auto bufferSize = data->mDesc.size().height * data->mSourcePitch; + + // step 1: find an available staging buffer, or create one + { + MutexAutoLock lock(data->mBuffersLock); + if (!data->mAvailableBufferIds.empty()) { + bufferId = data->mAvailableBufferIds.back(); + data->mAvailableBufferIds.pop_back(); + } else if (!data->mUnassignedBufferIds.empty()) { + bufferId = data->mUnassignedBufferIds.back(); + data->mUnassignedBufferIds.pop_back(); + + ffi::WGPUBufferUsages usage = + WGPUBufferUsages_COPY_DST | WGPUBufferUsages_MAP_READ; + + ErrorBuffer error; + ffi::wgpu_server_device_create_buffer(mContext.get(), data->mDeviceId, + bufferId, nullptr, bufferSize, + usage, false, false, error.ToFFI()); + if (ForwardError(data->mDeviceId, error)) { + return IPC_OK(); + } + } else { + bufferId = 0; + } + + if (bufferId) { + data->mQueuedBufferIds.insert(data->mQueuedBufferIds.begin(), bufferId); + } + } + + MOZ_LOG(sLogger, LogLevel::Info, + ("RecvSwapChainPresent with buffer %" PRIu64 "\n", bufferId)); + if (!bufferId) { + // TODO: add a warning - no buffer are available! + return IPC_OK(); + } + + // step 3: submit a copy command for the frame + ffi::WGPUCommandEncoderDescriptor encoderDesc = {}; + { + ErrorBuffer error; + ffi::wgpu_server_device_create_encoder(mContext.get(), data->mDeviceId, + &encoderDesc, aCommandEncoderId, + error.ToFFI()); + if (ForwardError(data->mDeviceId, error)) { + return IPC_OK(); + } + } + + const ffi::WGPUImageCopyTexture texView = { + aTextureId, + }; + const ffi::WGPUImageDataLayout bufLayout = { + 0, + &data->mSourcePitch, + nullptr, + }; + const ffi::WGPUExtent3d extent = { + static_cast(size.width), + static_cast(size.height), + 1, + }; + + { + ErrorBuffer error; + ffi::wgpu_server_encoder_copy_texture_to_buffer( + mContext.get(), aCommandEncoderId, &texView, bufferId, &bufLayout, + &extent, error.ToFFI()); + if (ForwardError(data->mDeviceId, error)) { + return IPC_OK(); + } + } + ffi::WGPUCommandBufferDescriptor commandDesc = {}; + { + ErrorBuffer error; + ffi::wgpu_server_encoder_finish(mContext.get(), aCommandEncoderId, + &commandDesc, error.ToFFI()); + if (ForwardError(data->mDeviceId, error)) { + return IPC_OK(); + } + } + + { + ErrorBuffer error; + ffi::wgpu_server_queue_submit(mContext.get(), data->mQueueId, + &aCommandEncoderId, 1, error.ToFFI()); + if (ForwardError(data->mDeviceId, error)) { + return IPC_OK(); + } + } + + auto& waitingTextures = data->mWaitingReadbackTexturesForPresent; + auto it = waitingTextures.find(aRemoteTextureId); + MOZ_ASSERT(it == waitingTextures.end()); + if (it == waitingTextures.end()) { + waitingTextures.emplace(aRemoteTextureId); + } + + // step 4: request the pixels to be copied into the external texture + // TODO: this isn't strictly necessary. When WR wants to Lock() the external + // texture, + // we can just give it the contents of the last mapped buffer instead of the + // copy. + auto presentRequest = MakeUnique( + mContext.get(), data, mRemoteTextureOwner, aRemoteTextureId, aOwnerId); + + ffi::WGPUBufferMapCallbackC callback = { + &ReadbackPresentCallback, + reinterpret_cast(presentRequest.release())}; + + ErrorBuffer error; + ffi::wgpu_server_buffer_map(mContext.get(), bufferId, 0, bufferSize, + ffi::WGPUHostMap_Read, callback, error.ToFFI()); + if (ForwardError(data->mDeviceId, error)) { + return IPC_OK(); + } + + return IPC_OK(); +} + +ipc::IPCResult WebGPUParent::RecvSwapChainDrop( + const layers::RemoteTextureOwnerId& aOwnerId, + layers::RemoteTextureTxnType aTxnType, layers::RemoteTextureTxnId aTxnId) { + const auto& lookup = mPresentationDataMap.find(aOwnerId); + MOZ_ASSERT(lookup != mPresentationDataMap.end()); + if (lookup == mPresentationDataMap.end()) { + NS_WARNING("WebGPU presenting on a destroyed swap chain!"); + return IPC_OK(); + } + + RefPtr data = lookup->second.get(); + + auto waitingCount = data->mWaitingReadbackTexturesForPresent.size(); + if (waitingCount > 0) { + // Defer SwapChainDrop until readback complete + data->mPendingSwapChainDrop = Some(PendingSwapChainDrop{aTxnType, aTxnId}); + return IPC_OK(); + } + + if (mRemoteTextureOwner) { + if (aTxnType && aTxnId) { + mRemoteTextureOwner->WaitForTxn(aOwnerId, aTxnType, aTxnId); + } + mRemoteTextureOwner->UnregisterTextureOwner(aOwnerId); + } + + mPresentationDataMap.erase(lookup); + + MutexAutoLock lock(data->mBuffersLock); + ipc::ByteBuf dropByteBuf; + for (const auto bid : data->mUnassignedBufferIds) { + wgpu_server_buffer_free(bid, ToFFI(&dropByteBuf)); + } + if (dropByteBuf.mData && !SendDropAction(std::move(dropByteBuf))) { + NS_WARNING("Unable to free an ID for non-assigned buffer"); + } + for (const auto bid : data->mAvailableBufferIds) { + ffi::wgpu_server_buffer_drop(mContext.get(), bid); + } + for (const auto bid : data->mQueuedBufferIds) { + ffi::wgpu_server_buffer_drop(mContext.get(), bid); + } + return IPC_OK(); +} + +void WebGPUParent::ActorDestroy(ActorDestroyReason aWhy) { + mTimer.Stop(); + mPresentationDataMap.clear(); + if (mRemoteTextureOwner) { + mRemoteTextureOwner->UnregisterAllTextureOwners(); + mRemoteTextureOwner = nullptr; + } + ffi::wgpu_server_poll_all_devices(mContext.get(), true); + mContext = nullptr; +} + +ipc::IPCResult WebGPUParent::RecvDeviceAction(RawId aDeviceId, + const ipc::ByteBuf& aByteBuf) { + ErrorBuffer error; + ffi::wgpu_server_device_action(mContext.get(), aDeviceId, ToFFI(&aByteBuf), + error.ToFFI()); + + ForwardError(aDeviceId, error); + return IPC_OK(); +} + +ipc::IPCResult WebGPUParent::RecvDeviceActionWithAck( + RawId aDeviceId, const ipc::ByteBuf& aByteBuf, + DeviceActionWithAckResolver&& aResolver) { + auto result = RecvDeviceAction(aDeviceId, aByteBuf); + aResolver(true); + return result; +} + +ipc::IPCResult WebGPUParent::RecvTextureAction(RawId aTextureId, + RawId aDeviceId, + const ipc::ByteBuf& aByteBuf) { + ErrorBuffer error; + ffi::wgpu_server_texture_action(mContext.get(), aTextureId, ToFFI(&aByteBuf), + error.ToFFI()); + + ForwardError(aDeviceId, error); + return IPC_OK(); +} + +ipc::IPCResult WebGPUParent::RecvCommandEncoderAction( + RawId aEncoderId, RawId aDeviceId, const ipc::ByteBuf& aByteBuf) { + ErrorBuffer error; + ffi::wgpu_server_command_encoder_action(mContext.get(), aEncoderId, + ToFFI(&aByteBuf), error.ToFFI()); + ForwardError(aDeviceId, error); + return IPC_OK(); +} + +ipc::IPCResult WebGPUParent::RecvBumpImplicitBindGroupLayout(RawId aPipelineId, + bool aIsCompute, + uint32_t aIndex, + RawId aAssignId) { + ErrorBuffer error; + if (aIsCompute) { + ffi::wgpu_server_compute_pipeline_get_bind_group_layout( + mContext.get(), aPipelineId, aIndex, aAssignId, error.ToFFI()); + } else { + ffi::wgpu_server_render_pipeline_get_bind_group_layout( + mContext.get(), aPipelineId, aIndex, aAssignId, error.ToFFI()); + } + + ForwardError(0, error); + return IPC_OK(); +} + +ipc::IPCResult WebGPUParent::RecvDevicePushErrorScope( + RawId aDeviceId, const dom::GPUErrorFilter aFilter) { + const auto& itr = mErrorScopeStackByDevice.find(aDeviceId); + if (itr == mErrorScopeStackByDevice.end()) { + // Content can cause this simply by destroying a device and then + // calling `pushErrorScope`. + return IPC_OK(); + } + auto& stack = itr->second; + + // Let's prevent `while (true) { pushErrorScope(); }`. + constexpr size_t MAX_ERROR_SCOPE_STACK_SIZE = 1'000'000; + if (stack.size() >= MAX_ERROR_SCOPE_STACK_SIZE) { + nsPrintfCString m("pushErrorScope: Hit MAX_ERROR_SCOPE_STACK_SIZE of %zu", + MAX_ERROR_SCOPE_STACK_SIZE); + ReportError(Some(aDeviceId), dom::GPUErrorFilter::Out_of_memory, m); + return IPC_OK(); + } + + const auto newScope = ErrorScope{aFilter}; + stack.push_back(newScope); + return IPC_OK(); +} + +ipc::IPCResult WebGPUParent::RecvDevicePopErrorScope( + RawId aDeviceId, DevicePopErrorScopeResolver&& aResolver) { + const auto popResult = [&]() { + const auto& itr = mErrorScopeStackByDevice.find(aDeviceId); + if (itr == mErrorScopeStackByDevice.end()) { + // Content can cause this simply by destroying a device and then + // calling `popErrorScope`. + return PopErrorScopeResult{PopErrorScopeResultType::DeviceLost}; + } + + auto& stack = itr->second; + if (!stack.size()) { + // Content can cause this simply by calling `popErrorScope` when + // there is no error scope pushed. + return PopErrorScopeResult{PopErrorScopeResultType::ThrowOperationError, + "popErrorScope on empty stack"_ns}; + } + + const auto& scope = stack.back(); + const auto popLater = MakeScopeExit([&]() { stack.pop_back(); }); + + auto ret = PopErrorScopeResult{PopErrorScopeResultType::NoError}; + if (scope.firstMessage) { + ret.message = *scope.firstMessage; + switch (scope.filter) { + case dom::GPUErrorFilter::Validation: + ret.resultType = PopErrorScopeResultType::ValidationError; + break; + case dom::GPUErrorFilter::Out_of_memory: + ret.resultType = PopErrorScopeResultType::OutOfMemory; + break; + case dom::GPUErrorFilter::Internal: + ret.resultType = PopErrorScopeResultType::InternalError; + break; + case dom::GPUErrorFilter::EndGuard_: + MOZ_CRASH("Bad GPUErrorFilter"); + } + } + return ret; + }(); + aResolver(popResult); + return IPC_OK(); +} + +ipc::IPCResult WebGPUParent::RecvGenerateError(const Maybe aDeviceId, + const dom::GPUErrorFilter aType, + const nsCString& aMessage) { + ReportError(aDeviceId, aType, aMessage); + return IPC_OK(); +} + +bool WebGPUParent::UseExternalTextureForSwapChain( + ffi::WGPUSwapChainId aSwapChainId) { + auto ownerId = layers::RemoteTextureOwnerId{aSwapChainId._0}; + const auto& lookup = mPresentationDataMap.find(ownerId); + if (lookup == mPresentationDataMap.end()) { + MOZ_ASSERT_UNREACHABLE("unexpected to be called"); + return IPC_OK(); + } + + RefPtr data = lookup->second.get(); + + return data->mUseExternalTextureInSwapChain; +} + +bool WebGPUParent::EnsureExternalTextureForSwapChain( + ffi::WGPUSwapChainId aSwapChainId, ffi::WGPUDeviceId aDeviceId, + ffi::WGPUTextureId aTextureId, uint32_t aWidth, uint32_t aHeight, + struct ffi::WGPUTextureFormat aFormat, ffi::WGPUTextureUsages aUsage) { + auto ownerId = layers::RemoteTextureOwnerId{aSwapChainId._0}; + const auto& lookup = mPresentationDataMap.find(ownerId); + if (lookup == mPresentationDataMap.end()) { + MOZ_ASSERT_UNREACHABLE("unexpected to be called"); + return false; + } + + RefPtr data = lookup->second.get(); + if (!data->mUseExternalTextureInSwapChain) { + MOZ_ASSERT_UNREACHABLE("unexpected to be called"); + return false; + } + + // Recycled ExternalTexture if it exists. + if (!data->mRecycledExternalTextures.empty()) { + std::shared_ptr texture = + data->mRecycledExternalTextures.front(); + // Check if the texture is recyclable. + if (texture->mWidth == aWidth && texture->mHeight == aHeight && + texture->mFormat.tag == aFormat.tag && texture->mUsage == aUsage) { + data->mRecycledExternalTextures.pop_front(); + mExternalTextures.emplace(aTextureId, texture); + return true; + } + data->mRecycledExternalTextures.clear(); + } + + auto externalTexture = CreateExternalTexture(aDeviceId, aTextureId, aWidth, + aHeight, aFormat, aUsage); + if (!externalTexture) { + return false; + } + return true; +} + +std::shared_ptr WebGPUParent::CreateExternalTexture( + ffi::WGPUDeviceId aDeviceId, ffi::WGPUTextureId aTextureId, uint32_t aWidth, + uint32_t aHeight, const struct ffi::WGPUTextureFormat aFormat, + ffi::WGPUTextureUsages aUsage) { + MOZ_RELEASE_ASSERT(mExternalTextures.find(aTextureId) == + mExternalTextures.end()); + + UniquePtr texture = + ExternalTexture::Create(aWidth, aHeight, aFormat, aUsage); + if (!texture) { + MOZ_ASSERT_UNREACHABLE("unexpected to be called"); + return nullptr; + } + + std::shared_ptr shared(texture.release()); + mExternalTextures.emplace(aTextureId, shared); + + return shared; +} + +std::shared_ptr WebGPUParent::GetExternalTexture( + ffi::WGPUTextureId aId) { + auto it = mExternalTextures.find(aId); + if (it == mExternalTextures.end()) { + return nullptr; + } + return it->second; +} + +/* static */ +Maybe WebGPUParent::GetCompositorDeviceLuid() { +#if defined(XP_WIN) + const RefPtr d3d11Device = + gfx::DeviceManagerDx::Get()->GetCompositorDevice(); + if (!d3d11Device) { + gfxCriticalNoteOnce << "CompositorDevice does not exist"; + return Nothing(); + } + + RefPtr dxgiDevice; + d3d11Device->QueryInterface((IDXGIDevice**)getter_AddRefs(dxgiDevice)); + + RefPtr dxgiAdapter; + dxgiDevice->GetAdapter(getter_AddRefs(dxgiAdapter)); + + DXGI_ADAPTER_DESC desc; + if (FAILED(dxgiAdapter->GetDesc(&desc))) { + gfxCriticalNoteOnce << "Failed to get DXGI_ADAPTER_DESC"; + return Nothing(); + } + + return Some( + ffi::WGPUFfiLUID{desc.AdapterLuid.LowPart, desc.AdapterLuid.HighPart}); +#else + return Nothing(); +#endif +} + +} // namespace mozilla::webgpu diff --git a/dom/webgpu/ipc/WebGPUParent.h b/dom/webgpu/ipc/WebGPUParent.h new file mode 100644 index 0000000000..6ad539c21e --- /dev/null +++ b/dom/webgpu/ipc/WebGPUParent.h @@ -0,0 +1,238 @@ +/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef WEBGPU_PARENT_H_ +#define WEBGPU_PARENT_H_ + +#include + +#include "mozilla/WeakPtr.h" +#include "mozilla/webgpu/ffi/wgpu.h" +#include "mozilla/webgpu/PWebGPUParent.h" +#include "mozilla/webrender/WebRenderAPI.h" +#include "mozilla/ipc/RawShmem.h" +#include "WebGPUTypes.h" +#include "base/timer.h" + +namespace mozilla { + +namespace layers { +class RemoteTextureOwnerClient; +} // namespace layers + +namespace webgpu { + +class ErrorBuffer; +class ExternalTexture; +class PresentationData; + +// Destroy/Drop messages: +// - Messages with "Destroy" in their name request deallocation of resources +// owned by the +// object and put the object in a destroyed state without deleting the object. +// It is still safe to reffer to these objects. +// - Messages with "Drop" in their name can be thought of as C++ destructors. +// They completely +// delete the object, so future attempts at accessing to these objects will +// crash. The child process should *never* send a Drop message if it still +// holds references to the object. An object that has been destroyed still +// needs to be dropped when the last reference to it dies on the child +// process. + +class WebGPUParent final : public PWebGPUParent, public SupportsWeakPtr { + NS_INLINE_DECL_THREADSAFE_REFCOUNTING(WebGPUParent, override) + + public: + explicit WebGPUParent(); + + ipc::IPCResult RecvInstanceRequestAdapter( + const dom::GPURequestAdapterOptions& aOptions, + const nsTArray& aTargetIds, + InstanceRequestAdapterResolver&& resolver); + ipc::IPCResult RecvAdapterRequestDevice( + RawId aAdapterId, const ipc::ByteBuf& aByteBuf, RawId aDeviceId, + AdapterRequestDeviceResolver&& resolver); + ipc::IPCResult RecvAdapterDrop(RawId aAdapterId); + ipc::IPCResult RecvDeviceDestroy(RawId aDeviceId); + ipc::IPCResult RecvDeviceDrop(RawId aDeviceId); + ipc::IPCResult RecvDeviceCreateBuffer(RawId aDeviceId, RawId aBufferId, + dom::GPUBufferDescriptor&& aDesc, + ipc::UnsafeSharedMemoryHandle&& aShmem); + ipc::IPCResult RecvBufferMap(RawId aDeviceId, RawId aBufferId, uint32_t aMode, + uint64_t aOffset, uint64_t size, + BufferMapResolver&& aResolver); + ipc::IPCResult RecvBufferUnmap(RawId aDeviceId, RawId aBufferId, bool aFlush); + ipc::IPCResult RecvBufferDestroy(RawId aBufferId); + ipc::IPCResult RecvBufferDrop(RawId aBufferId); + ipc::IPCResult RecvTextureDestroy(RawId aTextureId, RawId aDeviceId); + ipc::IPCResult RecvTextureDrop(RawId aTextureId); + ipc::IPCResult RecvTextureViewDrop(RawId aTextureViewId); + ipc::IPCResult RecvSamplerDrop(RawId aSamplerId); + ipc::IPCResult RecvCommandEncoderFinish( + RawId aEncoderId, RawId aDeviceId, + const dom::GPUCommandBufferDescriptor& aDesc); + ipc::IPCResult RecvCommandEncoderDrop(RawId aEncoderId); + ipc::IPCResult RecvCommandBufferDrop(RawId aCommandBufferId); + ipc::IPCResult RecvRenderBundleDrop(RawId aBundleId); + ipc::IPCResult RecvQueueSubmit(RawId aQueueId, RawId aDeviceId, + const nsTArray& aCommandBuffers, + const nsTArray& aTextureIds); + ipc::IPCResult RecvQueueOnSubmittedWorkDone( + RawId aQueueId, std::function&& aResolver); + ipc::IPCResult RecvQueueWriteAction(RawId aQueueId, RawId aDeviceId, + const ipc::ByteBuf& aByteBuf, + ipc::UnsafeSharedMemoryHandle&& aShmem); + ipc::IPCResult RecvBindGroupLayoutDrop(RawId aBindGroupLayoutId); + ipc::IPCResult RecvPipelineLayoutDrop(RawId aPipelineLayoutId); + ipc::IPCResult RecvBindGroupDrop(RawId aBindGroupId); + ipc::IPCResult RecvShaderModuleDrop(RawId aModuleId); + ipc::IPCResult RecvComputePipelineDrop(RawId aPipelineId); + ipc::IPCResult RecvRenderPipelineDrop(RawId aPipelineId); + ipc::IPCResult RecvImplicitLayoutDrop(RawId aImplicitPlId, + const nsTArray& aImplicitBglIds); + ipc::IPCResult RecvDeviceCreateSwapChain( + RawId aDeviceId, RawId aQueueId, const layers::RGBDescriptor& aDesc, + const nsTArray& aBufferIds, + const layers::RemoteTextureOwnerId& aOwnerId, + bool aUseExternalTextureInSwapChain); + ipc::IPCResult RecvDeviceCreateShaderModule( + RawId aDeviceId, RawId aModuleId, const nsString& aLabel, + const nsCString& aCode, DeviceCreateShaderModuleResolver&& aOutMessage); + + ipc::IPCResult RecvSwapChainPresent( + RawId aTextureId, RawId aCommandEncoderId, + const layers::RemoteTextureId& aRemoteTextureId, + const layers::RemoteTextureOwnerId& aOwnerId); + ipc::IPCResult RecvSwapChainDrop(const layers::RemoteTextureOwnerId& aOwnerId, + layers::RemoteTextureTxnType aTxnType, + layers::RemoteTextureTxnId aTxnId); + + ipc::IPCResult RecvDeviceAction(RawId aDeviceId, + const ipc::ByteBuf& aByteBuf); + ipc::IPCResult RecvDeviceActionWithAck( + RawId aDeviceId, const ipc::ByteBuf& aByteBuf, + DeviceActionWithAckResolver&& aResolver); + ipc::IPCResult RecvTextureAction(RawId aTextureId, RawId aDevice, + const ipc::ByteBuf& aByteBuf); + ipc::IPCResult RecvCommandEncoderAction(RawId aEncoderId, RawId aDeviceId, + const ipc::ByteBuf& aByteBuf); + ipc::IPCResult RecvBumpImplicitBindGroupLayout(RawId aPipelineId, + bool aIsCompute, + uint32_t aIndex, + RawId aAssignId); + + ipc::IPCResult RecvDevicePushErrorScope(RawId aDeviceId, dom::GPUErrorFilter); + ipc::IPCResult RecvDevicePopErrorScope( + RawId aDeviceId, DevicePopErrorScopeResolver&& aResolver); + ipc::IPCResult RecvGenerateError(Maybe aDeviceId, dom::GPUErrorFilter, + const nsCString& message); + + ipc::IPCResult GetFrontBufferSnapshot( + IProtocol* aProtocol, const layers::RemoteTextureOwnerId& aOwnerId, + Maybe& aShmem, gfx::IntSize& aSize); + + void ActorDestroy(ActorDestroyReason aWhy) override; + + struct BufferMapData { + ipc::WritableSharedMemoryMapping mShmem; + // True if buffer's usage has MAP_READ or MAP_WRITE set. + bool mHasMapFlags; + uint64_t mMappedOffset; + uint64_t mMappedSize; + RawId mDeviceId; + }; + + BufferMapData* GetBufferMapData(RawId aBufferId); + + bool UseExternalTextureForSwapChain(ffi::WGPUSwapChainId aSwapChainId); + + bool EnsureExternalTextureForSwapChain(ffi::WGPUSwapChainId aSwapChainId, + ffi::WGPUDeviceId aDeviceId, + ffi::WGPUTextureId aTextureId, + uint32_t aWidth, uint32_t aHeight, + struct ffi::WGPUTextureFormat aFormat, + ffi::WGPUTextureUsages aUsage); + + std::shared_ptr CreateExternalTexture( + ffi::WGPUDeviceId aDeviceId, ffi::WGPUTextureId aTextureId, + uint32_t aWidth, uint32_t aHeight, + const struct ffi::WGPUTextureFormat aFormat, + ffi::WGPUTextureUsages aUsage); + + std::shared_ptr GetExternalTexture(ffi::WGPUTextureId aId); + + void PostExternalTexture( + const std::shared_ptr&& aExternalTexture, + const layers::RemoteTextureId aRemoteTextureId, + const layers::RemoteTextureOwnerId aOwnerId); + + bool ForwardError(const RawId aDeviceId, ErrorBuffer& aError) { + return ForwardError(Some(aDeviceId), aError); + } + + private: + static void MapCallback(ffi::WGPUBufferMapAsyncStatus aStatus, + uint8_t* aUserData); + static void DeviceLostCallback(uint8_t* aUserData, uint8_t aReason, + const char* aMessage); + void DeallocBufferShmem(RawId aBufferId); + + void RemoveExternalTexture(RawId aTextureId); + + virtual ~WebGPUParent(); + void MaintainDevices(); + void LoseDevice(const RawId aDeviceId, Maybe aReason, + const nsACString& aMessage); + + bool ForwardError(Maybe aDeviceId, ErrorBuffer& aError); + + void ReportError(Maybe aDeviceId, GPUErrorFilter, + const nsCString& message); + + static Maybe GetCompositorDeviceLuid(); + + UniquePtr mContext; + base::RepeatingTimer mTimer; + + /// A map from wgpu buffer ids to data about their shared memory segments. + /// Includes entries about mappedAtCreation, MAP_READ and MAP_WRITE buffers, + /// regardless of their state. + std::unordered_map mSharedMemoryMap; + /// Associated presentation data for each swapchain. + std::unordered_map, + layers::RemoteTextureOwnerId::HashFn> + mPresentationDataMap; + + RefPtr mRemoteTextureOwner; + + /// Associated stack of error scopes for each device. + std::unordered_map> + mErrorScopeStackByDevice; + + std::unordered_map> + mExternalTextures; + + // Store a set of DeviceIds that have been SendDeviceLost. We use this to + // limit each Device to one DeviceLost message. + nsTHashSet mLostDeviceIds; + + // Shared handle of wgpu device's fence. + RefPtr mFenceHandle; + + // Store DeviceLostRequest structs for each device as unique_ptrs mapped + // to their device ids. We keep these unique_ptrs alive as long as the + // device is alive. + struct DeviceLostRequest { + WeakPtr mParent; + RawId mDeviceId; + }; + std::unordered_map> + mDeviceLostRequests; +}; + +} // namespace webgpu +} // namespace mozilla + +#endif // WEBGPU_PARENT_H_ diff --git a/dom/webgpu/ipc/WebGPUSerialize.h b/dom/webgpu/ipc/WebGPUSerialize.h new file mode 100644 index 0000000000..8d78d784cb --- /dev/null +++ b/dom/webgpu/ipc/WebGPUSerialize.h @@ -0,0 +1,63 @@ +/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef WEBGPU_SERIALIZE_H_ +#define WEBGPU_SERIALIZE_H_ + +#include "WebGPUTypes.h" +#include "ipc/EnumSerializer.h" +#include "ipc/IPCMessageUtils.h" +#include "mozilla/dom/WebGPUBinding.h" +#include "mozilla/webgpu/ffi/wgpu.h" + +namespace IPC { + +#define DEFINE_IPC_SERIALIZER_ENUM_GUARD(something, guard) \ + template <> \ + struct ParamTraits \ + : public ContiguousEnumSerializer {} + +#define DEFINE_IPC_SERIALIZER_DOM_ENUM(something) \ + DEFINE_IPC_SERIALIZER_ENUM_GUARD(something, something::EndGuard_) +#define DEFINE_IPC_SERIALIZER_FFI_ENUM(something) \ + DEFINE_IPC_SERIALIZER_ENUM_GUARD(something, something##_Sentinel) + +// - + +DEFINE_IPC_SERIALIZER_DOM_ENUM(mozilla::dom::GPUErrorFilter); +DEFINE_IPC_SERIALIZER_DOM_ENUM(mozilla::dom::GPUPowerPreference); + +DEFINE_IPC_SERIALIZER_FFI_ENUM(mozilla::webgpu::ffi::WGPUHostMap); + +DEFINE_IPC_SERIALIZER_WITHOUT_FIELDS(mozilla::dom::GPUCommandBufferDescriptor); + +DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::dom::GPURequestAdapterOptions, + mPowerPreference, mForceFallbackAdapter); + +DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::dom::GPUBufferDescriptor, mSize, + mUsage, mMappedAtCreation); + +DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::webgpu::PopErrorScopeResult, + resultType, message); + +DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::webgpu::WebGPUCompilationMessage, + message, lineNum, linePos); + +#undef DEFINE_IPC_SERIALIZER_FFI_ENUM +#undef DEFINE_IPC_SERIALIZER_DOM_ENUM +#undef DEFINE_IPC_SERIALIZER_ENUM_GUARD + +// - + +template <> +struct ParamTraits + : public ContiguousEnumSerializerInclusive< + mozilla::webgpu::PopErrorScopeResultType, + mozilla::webgpu::PopErrorScopeResultType{0}, + mozilla::webgpu::PopErrorScopeResultType::_LAST> {}; + +} // namespace IPC + +#endif // WEBGPU_SERIALIZE_H_ diff --git a/dom/webgpu/ipc/WebGPUTypes.h b/dom/webgpu/ipc/WebGPUTypes.h new file mode 100644 index 0000000000..ce6685ded5 --- /dev/null +++ b/dom/webgpu/ipc/WebGPUTypes.h @@ -0,0 +1,82 @@ +/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef WEBGPU_TYPES_H_ +#define WEBGPU_TYPES_H_ + +#include +#include "mozilla/Maybe.h" +#include "nsString.h" +#include "mozilla/dom/BindingDeclarations.h" + +namespace mozilla::dom { +enum class GPUErrorFilter : uint8_t; +} // namespace mozilla::dom + +namespace mozilla::webgpu { + +using RawId = uint64_t; +using BufferAddress = uint64_t; + +struct ErrorScope { + dom::GPUErrorFilter filter; + Maybe firstMessage; +}; + +enum class PopErrorScopeResultType : uint8_t { + NoError, + ThrowOperationError, + ValidationError, + OutOfMemory, + InternalError, + DeviceLost, + _LAST = DeviceLost, +}; + +struct PopErrorScopeResult { + PopErrorScopeResultType resultType; + nsCString message; +}; + +enum class WebGPUCompilationMessageType { Error, Warning, Info }; + +// TODO: Better name? CompilationMessage alread taken by the dom object. +/// The serializable counterpart of the dom object CompilationMessage. +struct WebGPUCompilationMessage { + nsString message; + uint64_t lineNum = 0; + uint64_t linePos = 0; + // In utf16 code units. + uint64_t offset = 0; + // In utf16 code units. + uint64_t length = 0; + WebGPUCompilationMessageType messageType = + WebGPUCompilationMessageType::Error; +}; + +/// A helper to reduce the boiler plate of turning the many Optional +/// we get from the dom to the nullable nsACString* we pass to the wgpu ffi. +class StringHelper { + public: + explicit StringHelper(const nsString& aWide) { + if (!aWide.IsEmpty()) { + mNarrow = Some(NS_ConvertUTF16toUTF8(aWide)); + } + } + + const nsACString* Get() const { + if (mNarrow.isSome()) { + return mNarrow.ptr(); + } + return nullptr; + } + + private: + Maybe mNarrow; +}; + +} // namespace mozilla::webgpu + +#endif // WEBGPU_TYPES_H_ -- cgit v1.2.3