summaryrefslogtreecommitdiffstats
path: root/dom/webgpu/ipc
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 09:22:09 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 09:22:09 +0000
commit43a97878ce14b72f0981164f87f2e35e14151312 (patch)
tree620249daf56c0258faa40cbdcf9cfba06de2a846 /dom/webgpu/ipc
parentInitial commit. (diff)
downloadfirefox-43a97878ce14b72f0981164f87f2e35e14151312.tar.xz
firefox-43a97878ce14b72f0981164f87f2e35e14151312.zip
Adding upstream version 110.0.1.upstream/110.0.1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'dom/webgpu/ipc')
-rw-r--r--dom/webgpu/ipc/PWebGPU.ipdl93
-rw-r--r--dom/webgpu/ipc/PWebGPUTypes.ipdlh26
-rw-r--r--dom/webgpu/ipc/WebGPUChild.cpp1080
-rw-r--r--dom/webgpu/ipc/WebGPUChild.h146
-rw-r--r--dom/webgpu/ipc/WebGPUParent.cpp1116
-rw-r--r--dom/webgpu/ipc/WebGPUParent.h156
-rw-r--r--dom/webgpu/ipc/WebGPUSerialize.h50
-rw-r--r--dom/webgpu/ipc/WebGPUTypes.h69
8 files changed, 2736 insertions, 0 deletions
diff --git a/dom/webgpu/ipc/PWebGPU.ipdl b/dom/webgpu/ipc/PWebGPU.ipdl
new file mode 100644
index 0000000000..daa3873550
--- /dev/null
+++ b/dom/webgpu/ipc/PWebGPU.ipdl
@@ -0,0 +1,93 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: sw=2 ts=8 et :
+ */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+using layers::RGBDescriptor from "mozilla/layers/LayersSurfaces.h";
+using layers::RemoteTextureId from "mozilla/layers/LayersTypes.h";
+using layers::RemoteTextureOwnerId from "mozilla/layers/LayersTypes.h";
+using RawId from "mozilla/webgpu/WebGPUTypes.h";
+using dom::GPURequestAdapterOptions from "mozilla/dom/WebGPUBinding.h";
+using dom::GPUCommandBufferDescriptor from "mozilla/dom/WebGPUBinding.h";
+using dom::GPUBufferDescriptor from "mozilla/dom/WebGPUBinding.h";
+using MaybeScopedError from "mozilla/webgpu/WebGPUTypes.h";
+using WebGPUCompilationMessage from "mozilla/webgpu/WebGPUTypes.h";
+[MoveOnly] using class mozilla::ipc::UnsafeSharedMemoryHandle from "mozilla/ipc/RawShmem.h";
+
+include "mozilla/ipc/ByteBufUtils.h";
+include "mozilla/layers/LayersMessageUtils.h";
+include "mozilla/webgpu/WebGPUSerialize.h";
+include "mozilla/layers/WebRenderMessageUtils.h";
+include protocol PCanvasManager;
+include PWebGPUTypes;
+
+namespace mozilla {
+namespace webgpu {
+
+/**
+ * Represents the connection between a WebGPUChild actor that issues WebGPU
+ * command from the content process, and a WebGPUParent in the compositor
+ * process that runs the commands.
+ */
+async protocol PWebGPU
+{
+ manager PCanvasManager;
+
+parent:
+ async DeviceAction(RawId selfId, ByteBuf buf);
+ async DeviceActionWithAck(RawId selfId, ByteBuf buf) returns (bool dummy);
+ async TextureAction(RawId selfId, RawId aDeviceId, ByteBuf buf);
+ async CommandEncoderAction(RawId selfId, RawId aDeviceId, ByteBuf buf);
+ async BumpImplicitBindGroupLayout(RawId pipelineId, bool isCompute, uint32_t index, RawId assignId);
+
+ async CreateBuffer(RawId deviceId, RawId bufferId, GPUBufferDescriptor desc, UnsafeSharedMemoryHandle shm);
+
+ async InstanceRequestAdapter(GPURequestAdapterOptions options, RawId[] ids) returns (ByteBuf byteBuf);
+ async AdapterRequestDevice(RawId selfId, ByteBuf buf, RawId newId) returns (bool success);
+ async AdapterDestroy(RawId selfId);
+ // TODO: We want to return an array of compilation messages.
+ async DeviceCreateShaderModule(RawId selfId, RawId bufferId, nsString label, nsCString code) returns (WebGPUCompilationMessage[] messages);
+ async BufferMap(RawId selfId, uint32_t aMode, uint64_t offset, uint64_t size) returns (BufferMapResult result);
+ async BufferUnmap(RawId deviceId, RawId bufferId, bool flush);
+ async BufferDestroy(RawId selfId);
+ async BufferDrop(RawId selfId);
+ async TextureDestroy(RawId selfId);
+ async TextureViewDestroy(RawId selfId);
+ async SamplerDestroy(RawId selfId);
+ async DeviceDestroy(RawId selfId);
+
+ async CommandEncoderFinish(RawId selfId, RawId deviceId, GPUCommandBufferDescriptor desc);
+ async CommandEncoderDestroy(RawId selfId);
+ async CommandBufferDestroy(RawId selfId);
+ async RenderBundleDestroy(RawId selfId);
+ async QueueSubmit(RawId selfId, RawId aDeviceId, RawId[] commandBuffers);
+ async QueueWriteAction(RawId selfId, RawId aDeviceId, ByteBuf buf, UnsafeSharedMemoryHandle shmem);
+
+ async BindGroupLayoutDestroy(RawId selfId);
+ async PipelineLayoutDestroy(RawId selfId);
+ async BindGroupDestroy(RawId selfId);
+ async ShaderModuleDestroy(RawId selfId);
+ async ComputePipelineDestroy(RawId selfId);
+ async RenderPipelineDestroy(RawId selfId);
+ async ImplicitLayoutDestroy(RawId implicitPlId, RawId[] implicitBglIds);
+ async DeviceCreateSwapChain(RawId selfId, RawId queueId, RGBDescriptor desc, RawId[] bufferIds, RemoteTextureOwnerId ownerId);
+ async SwapChainPresent(RawId textureId, RawId commandEncoderId, RemoteTextureId remoteTextureId, RemoteTextureOwnerId remoteTextureOwnerId);
+ async SwapChainDestroy(RemoteTextureOwnerId ownerId);
+
+ async DevicePushErrorScope(RawId selfId);
+ async DevicePopErrorScope(RawId selfId) returns (MaybeScopedError maybeError);
+
+ // Generate an error on the Device timeline for `deviceId`.
+ // The `message` parameter is interpreted as UTF-8.
+ async GenerateError(RawId deviceId, nsCString message);
+
+child:
+ async DeviceUncapturedError(RawId aDeviceId, nsCString message);
+ async DropAction(ByteBuf buf);
+ async __delete__();
+};
+
+} // webgpu
+} // mozilla
diff --git a/dom/webgpu/ipc/PWebGPUTypes.ipdlh b/dom/webgpu/ipc/PWebGPUTypes.ipdlh
new file mode 100644
index 0000000000..98f062856c
--- /dev/null
+++ b/dom/webgpu/ipc/PWebGPUTypes.ipdlh
@@ -0,0 +1,26 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+using struct mozilla::null_t from "mozilla/ipc/IPCCore.h";
+
+namespace mozilla {
+namespace webgpu {
+
+struct BufferMapSuccess {
+ uint64_t offset;
+ uint64_t size;
+ bool writable;
+};
+
+struct BufferMapError {
+ nsCString message;
+};
+
+union BufferMapResult {
+ BufferMapSuccess;
+ BufferMapError;
+};
+
+} // namespace layers
+} // namespace mozilla
diff --git a/dom/webgpu/ipc/WebGPUChild.cpp b/dom/webgpu/ipc/WebGPUChild.cpp
new file mode 100644
index 0000000000..5bc08c4386
--- /dev/null
+++ b/dom/webgpu/ipc/WebGPUChild.cpp
@@ -0,0 +1,1080 @@
+/* -*- Mode: C++; tab-width: 20; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "WebGPUChild.h"
+#include "js/Warnings.h" // JS::WarnUTF8
+#include "mozilla/EnumTypeTraits.h"
+#include "mozilla/dom/Promise.h"
+#include "mozilla/dom/ScriptSettings.h"
+#include "mozilla/dom/WebGPUBinding.h"
+#include "mozilla/dom/GPUUncapturedErrorEvent.h"
+#include "mozilla/webgpu/ValidationError.h"
+#include "mozilla/webgpu/ffi/wgpu.h"
+#include "Adapter.h"
+#include "DeviceLostInfo.h"
+#include "Sampler.h"
+#include "CompilationInfo.h"
+#include "mozilla/ipc/RawShmem.h"
+
+namespace mozilla::webgpu {
+
+NS_IMPL_CYCLE_COLLECTION(WebGPUChild)
+
+void WebGPUChild::JsWarning(nsIGlobalObject* aGlobal,
+ const nsACString& aMessage) {
+ const auto& flatString = PromiseFlatCString(aMessage);
+ if (aGlobal) {
+ dom::AutoJSAPI api;
+ if (api.Init(aGlobal)) {
+ JS::WarnUTF8(api.cx(), "%s", flatString.get());
+ }
+ } else {
+ printf_stderr("Validation error without device target: %s\n",
+ flatString.get());
+ }
+}
+
+static ffi::WGPUCompareFunction ConvertCompareFunction(
+ const dom::GPUCompareFunction& aCompare) {
+ // Value of 0 = Undefined is reserved on the C side for "null" semantics.
+ return ffi::WGPUCompareFunction(UnderlyingValue(aCompare) + 1);
+}
+
+static ffi::WGPUTextureFormat ConvertTextureFormat(
+ const dom::GPUTextureFormat& aFormat) {
+ ffi::WGPUTextureFormat result = {ffi::WGPUTextureFormat_Sentinel};
+ switch (aFormat) {
+ case dom::GPUTextureFormat::R8unorm:
+ result.tag = ffi::WGPUTextureFormat_R8Unorm;
+ break;
+ case dom::GPUTextureFormat::R8snorm:
+ result.tag = ffi::WGPUTextureFormat_R8Snorm;
+ break;
+ case dom::GPUTextureFormat::R8uint:
+ result.tag = ffi::WGPUTextureFormat_R8Uint;
+ break;
+ case dom::GPUTextureFormat::R8sint:
+ result.tag = ffi::WGPUTextureFormat_R8Sint;
+ break;
+ case dom::GPUTextureFormat::R16uint:
+ result.tag = ffi::WGPUTextureFormat_R16Uint;
+ break;
+ case dom::GPUTextureFormat::R16sint:
+ result.tag = ffi::WGPUTextureFormat_R16Sint;
+ break;
+ case dom::GPUTextureFormat::R16float:
+ result.tag = ffi::WGPUTextureFormat_R16Float;
+ break;
+ case dom::GPUTextureFormat::Rg8unorm:
+ result.tag = ffi::WGPUTextureFormat_Rg8Unorm;
+ break;
+ case dom::GPUTextureFormat::Rg8snorm:
+ result.tag = ffi::WGPUTextureFormat_Rg8Snorm;
+ break;
+ case dom::GPUTextureFormat::Rg8uint:
+ result.tag = ffi::WGPUTextureFormat_Rg8Uint;
+ break;
+ case dom::GPUTextureFormat::Rg8sint:
+ result.tag = ffi::WGPUTextureFormat_Rg8Sint;
+ break;
+ case dom::GPUTextureFormat::R32uint:
+ result.tag = ffi::WGPUTextureFormat_R32Uint;
+ break;
+ case dom::GPUTextureFormat::R32sint:
+ result.tag = ffi::WGPUTextureFormat_R32Sint;
+ break;
+ case dom::GPUTextureFormat::R32float:
+ result.tag = ffi::WGPUTextureFormat_R32Float;
+ break;
+ case dom::GPUTextureFormat::Rg16uint:
+ result.tag = ffi::WGPUTextureFormat_Rg16Uint;
+ break;
+ case dom::GPUTextureFormat::Rg16sint:
+ result.tag = ffi::WGPUTextureFormat_Rg16Sint;
+ break;
+ case dom::GPUTextureFormat::Rg16float:
+ result.tag = ffi::WGPUTextureFormat_Rg16Float;
+ break;
+ case dom::GPUTextureFormat::Rgba8unorm:
+ result.tag = ffi::WGPUTextureFormat_Rgba8Unorm;
+ break;
+ case dom::GPUTextureFormat::Rgba8unorm_srgb:
+ result.tag = ffi::WGPUTextureFormat_Rgba8UnormSrgb;
+ break;
+ case dom::GPUTextureFormat::Rgba8snorm:
+ result.tag = ffi::WGPUTextureFormat_Rgba8Snorm;
+ break;
+ case dom::GPUTextureFormat::Rgba8uint:
+ result.tag = ffi::WGPUTextureFormat_Rgba8Uint;
+ break;
+ case dom::GPUTextureFormat::Rgba8sint:
+ result.tag = ffi::WGPUTextureFormat_Rgba8Sint;
+ break;
+ case dom::GPUTextureFormat::Bgra8unorm:
+ result.tag = ffi::WGPUTextureFormat_Bgra8Unorm;
+ break;
+ case dom::GPUTextureFormat::Bgra8unorm_srgb:
+ result.tag = ffi::WGPUTextureFormat_Bgra8UnormSrgb;
+ break;
+ case dom::GPUTextureFormat::Rgb10a2unorm:
+ result.tag = ffi::WGPUTextureFormat_Rgb10a2Unorm;
+ break;
+ case dom::GPUTextureFormat::Rg11b10float:
+ result.tag = ffi::WGPUTextureFormat_Rg11b10Float;
+ break;
+ case dom::GPUTextureFormat::Rg32uint:
+ result.tag = ffi::WGPUTextureFormat_Rg32Uint;
+ break;
+ case dom::GPUTextureFormat::Rg32sint:
+ result.tag = ffi::WGPUTextureFormat_Rg32Sint;
+ break;
+ case dom::GPUTextureFormat::Rg32float:
+ result.tag = ffi::WGPUTextureFormat_Rg32Float;
+ break;
+ case dom::GPUTextureFormat::Rgba16uint:
+ result.tag = ffi::WGPUTextureFormat_Rgba16Uint;
+ break;
+ case dom::GPUTextureFormat::Rgba16sint:
+ result.tag = ffi::WGPUTextureFormat_Rgba16Sint;
+ break;
+ case dom::GPUTextureFormat::Rgba16float:
+ result.tag = ffi::WGPUTextureFormat_Rgba16Float;
+ break;
+ case dom::GPUTextureFormat::Rgba32uint:
+ result.tag = ffi::WGPUTextureFormat_Rgba32Uint;
+ break;
+ case dom::GPUTextureFormat::Rgba32sint:
+ result.tag = ffi::WGPUTextureFormat_Rgba32Sint;
+ break;
+ case dom::GPUTextureFormat::Rgba32float:
+ result.tag = ffi::WGPUTextureFormat_Rgba32Float;
+ break;
+ case dom::GPUTextureFormat::Depth32float:
+ result.tag = ffi::WGPUTextureFormat_Depth32Float;
+ break;
+ case dom::GPUTextureFormat::Bc1_rgba_unorm:
+ result.tag = ffi::WGPUTextureFormat_Bc1RgbaUnorm;
+ break;
+ case dom::GPUTextureFormat::Bc1_rgba_unorm_srgb:
+ result.tag = ffi::WGPUTextureFormat_Bc1RgbaUnormSrgb;
+ break;
+ case dom::GPUTextureFormat::Bc4_r_unorm:
+ result.tag = ffi::WGPUTextureFormat_Bc4RUnorm;
+ break;
+ case dom::GPUTextureFormat::Bc4_r_snorm:
+ result.tag = ffi::WGPUTextureFormat_Bc4RSnorm;
+ break;
+ case dom::GPUTextureFormat::Bc2_rgba_unorm:
+ result.tag = ffi::WGPUTextureFormat_Bc2RgbaUnorm;
+ break;
+ case dom::GPUTextureFormat::Bc2_rgba_unorm_srgb:
+ result.tag = ffi::WGPUTextureFormat_Bc2RgbaUnormSrgb;
+ break;
+ case dom::GPUTextureFormat::Bc3_rgba_unorm:
+ result.tag = ffi::WGPUTextureFormat_Bc3RgbaUnorm;
+ break;
+ case dom::GPUTextureFormat::Bc3_rgba_unorm_srgb:
+ result.tag = ffi::WGPUTextureFormat_Bc3RgbaUnormSrgb;
+ break;
+ case dom::GPUTextureFormat::Bc5_rg_unorm:
+ result.tag = ffi::WGPUTextureFormat_Bc5RgUnorm;
+ break;
+ case dom::GPUTextureFormat::Bc5_rg_snorm:
+ result.tag = ffi::WGPUTextureFormat_Bc5RgSnorm;
+ break;
+ case dom::GPUTextureFormat::Bc6h_rgb_ufloat:
+ result.tag = ffi::WGPUTextureFormat_Bc6hRgbUfloat;
+ break;
+ case dom::GPUTextureFormat::Bc6h_rgb_float:
+ result.tag = ffi::WGPUTextureFormat_Bc6hRgbSfloat;
+ break;
+ case dom::GPUTextureFormat::Bc7_rgba_unorm:
+ result.tag = ffi::WGPUTextureFormat_Bc7RgbaUnorm;
+ break;
+ case dom::GPUTextureFormat::Bc7_rgba_unorm_srgb:
+ result.tag = ffi::WGPUTextureFormat_Bc7RgbaUnormSrgb;
+ break;
+ case dom::GPUTextureFormat::Depth24plus:
+ result.tag = ffi::WGPUTextureFormat_Depth24Plus;
+ break;
+ case dom::GPUTextureFormat::Depth24plus_stencil8:
+ result.tag = ffi::WGPUTextureFormat_Depth24PlusStencil8;
+ break;
+ case dom::GPUTextureFormat::EndGuard_:
+ MOZ_ASSERT_UNREACHABLE();
+ }
+
+ // Clang will check for us that the switch above is exhaustive,
+ // but not if we add a 'default' case. So, check this here.
+ MOZ_ASSERT(result.tag != ffi::WGPUTextureFormat_Sentinel,
+ "unexpected texture format enum");
+
+ return result;
+}
+
+void WebGPUChild::ConvertTextureFormatRef(const dom::GPUTextureFormat& aInput,
+ ffi::WGPUTextureFormat& aOutput) {
+ aOutput = ConvertTextureFormat(aInput);
+}
+
+static UniquePtr<ffi::WGPUClient> initialize() {
+ ffi::WGPUInfrastructure infra = ffi::wgpu_client_new();
+ return UniquePtr<ffi::WGPUClient>{infra.client};
+}
+
+WebGPUChild::WebGPUChild() : mClient(initialize()) {}
+
+WebGPUChild::~WebGPUChild() = default;
+
+RefPtr<AdapterPromise> WebGPUChild::InstanceRequestAdapter(
+ const dom::GPURequestAdapterOptions& aOptions) {
+ const int max_ids = 10;
+ RawId ids[max_ids] = {0};
+ unsigned long count =
+ ffi::wgpu_client_make_adapter_ids(mClient.get(), ids, max_ids);
+
+ nsTArray<RawId> sharedIds(count);
+ for (unsigned long i = 0; i != count; ++i) {
+ sharedIds.AppendElement(ids[i]);
+ }
+
+ return SendInstanceRequestAdapter(aOptions, sharedIds)
+ ->Then(
+ GetCurrentSerialEventTarget(), __func__,
+ [](ipc::ByteBuf&& aInfoBuf) {
+ // Ideally, we'd just send an empty ByteBuf, but the IPC code
+ // complains if the capacity is zero...
+ // So for the case where an adapter wasn't found, we just
+ // transfer a single 0u64 in this buffer.
+ return aInfoBuf.mLen > sizeof(uint64_t)
+ ? AdapterPromise::CreateAndResolve(std::move(aInfoBuf),
+ __func__)
+ : AdapterPromise::CreateAndReject(Nothing(), __func__);
+ },
+ [](const ipc::ResponseRejectReason& aReason) {
+ return AdapterPromise::CreateAndReject(Some(aReason), __func__);
+ });
+}
+
+Maybe<DeviceRequest> WebGPUChild::AdapterRequestDevice(
+ RawId aSelfId, const dom::GPUDeviceDescriptor& aDesc,
+ ffi::WGPULimits* aLimits) {
+ ffi::WGPUDeviceDescriptor desc = {};
+ ffi::wgpu_client_fill_default_limits(&desc.limits);
+
+ // webgpu::StringHelper label(aDesc.mLabel);
+ // desc.label = label.Get();
+
+ const auto featureBits = Adapter::MakeFeatureBits(aDesc.mRequiredFeatures);
+ if (!featureBits) {
+ return Nothing();
+ }
+ desc.features = *featureBits;
+
+ if (aDesc.mRequiredLimits.WasPassed()) {
+ for (const auto& entry : aDesc.mRequiredLimits.Value().Entries()) {
+ const uint32_t valueU32 =
+ entry.mValue < std::numeric_limits<uint32_t>::max()
+ ? entry.mValue
+ : std::numeric_limits<uint32_t>::max();
+ if (entry.mKey == u"maxTextureDimension1D"_ns) {
+ desc.limits.max_texture_dimension_1d = valueU32;
+ } else if (entry.mKey == u"maxTextureDimension2D"_ns) {
+ desc.limits.max_texture_dimension_2d = valueU32;
+ } else if (entry.mKey == u"maxTextureDimension3D"_ns) {
+ desc.limits.max_texture_dimension_3d = valueU32;
+ } else if (entry.mKey == u"maxTextureArrayLayers"_ns) {
+ desc.limits.max_texture_array_layers = valueU32;
+ } else if (entry.mKey == u"maxBindGroups"_ns) {
+ desc.limits.max_bind_groups = valueU32;
+ } else if (entry.mKey ==
+ u"maxDynamicUniformBuffersPerPipelineLayout"_ns) {
+ desc.limits.max_dynamic_uniform_buffers_per_pipeline_layout = valueU32;
+ } else if (entry.mKey ==
+ u"maxDynamicStorageBuffersPerPipelineLayout"_ns) {
+ desc.limits.max_dynamic_storage_buffers_per_pipeline_layout = valueU32;
+ } else if (entry.mKey == u"maxSampledTexturesPerShaderStage"_ns) {
+ desc.limits.max_sampled_textures_per_shader_stage = valueU32;
+ } else if (entry.mKey == u"maxSamplersPerShaderStage"_ns) {
+ desc.limits.max_samplers_per_shader_stage = valueU32;
+ } else if (entry.mKey == u"maxStorageBuffersPerShaderStage"_ns) {
+ desc.limits.max_storage_buffers_per_shader_stage = valueU32;
+ } else if (entry.mKey == u"maxStorageTexturesPerShaderStage"_ns) {
+ desc.limits.max_storage_textures_per_shader_stage = valueU32;
+ } else if (entry.mKey == u"maxUniformBuffersPerShaderStage"_ns) {
+ desc.limits.max_uniform_buffers_per_shader_stage = valueU32;
+ } else if (entry.mKey == u"maxUniformBufferBindingSize"_ns) {
+ desc.limits.max_uniform_buffer_binding_size = entry.mValue;
+ } else if (entry.mKey == u"maxStorageBufferBindingSize"_ns) {
+ desc.limits.max_storage_buffer_binding_size = entry.mValue;
+ } else if (entry.mKey == u"minUniformBufferOffsetAlignment"_ns) {
+ desc.limits.min_uniform_buffer_offset_alignment = valueU32;
+ } else if (entry.mKey == u"minStorageBufferOffsetAlignment"_ns) {
+ desc.limits.min_storage_buffer_offset_alignment = valueU32;
+ } else if (entry.mKey == u"maxVertexBuffers"_ns) {
+ desc.limits.max_vertex_buffers = valueU32;
+ } else if (entry.mKey == u"maxVertexAttributes"_ns) {
+ desc.limits.max_vertex_attributes = valueU32;
+ } else if (entry.mKey == u"maxVertexBufferArrayStride"_ns) {
+ desc.limits.max_vertex_buffer_array_stride = valueU32;
+ } else if (entry.mKey == u"maxComputeWorkgroupSizeX"_ns) {
+ desc.limits.max_compute_workgroup_size_x = valueU32;
+ } else if (entry.mKey == u"maxComputeWorkgroupSizeY"_ns) {
+ desc.limits.max_compute_workgroup_size_y = valueU32;
+ } else if (entry.mKey == u"maxComputeWorkgroupSizeZ"_ns) {
+ desc.limits.max_compute_workgroup_size_z = valueU32;
+ } else if (entry.mKey == u"maxComputeWorkgroupsPerDimension"_ns) {
+ desc.limits.max_compute_workgroups_per_dimension = valueU32;
+ } else {
+ NS_WARNING(nsPrintfCString("Requested limit '%s' is not recognized.",
+ NS_ConvertUTF16toUTF8(entry.mKey).get())
+ .get());
+ return Nothing();
+ }
+
+ // TODO: maxInterStageShaderComponents
+ // TODO: maxComputeWorkgroupStorageSize
+ // TODO: maxComputeInvocationsPerWorkgroup
+ }
+ }
+
+ RawId id = ffi::wgpu_client_make_device_id(mClient.get(), aSelfId);
+
+ ByteBuf bb;
+ ffi::wgpu_client_serialize_device_descriptor(&desc, ToFFI(&bb));
+
+ DeviceRequest request;
+ request.mId = id;
+ request.mPromise = SendAdapterRequestDevice(aSelfId, std::move(bb), id);
+ *aLimits = desc.limits;
+
+ return Some(std::move(request));
+}
+
+RawId WebGPUChild::DeviceCreateBuffer(RawId aSelfId,
+ const dom::GPUBufferDescriptor& aDesc,
+ ipc::UnsafeSharedMemoryHandle&& aShmem) {
+ RawId bufferId = ffi::wgpu_client_make_buffer_id(mClient.get(), aSelfId);
+ if (!SendCreateBuffer(aSelfId, bufferId, aDesc, std::move(aShmem))) {
+ MOZ_CRASH("IPC failure");
+ }
+ return bufferId;
+}
+
+RawId WebGPUChild::DeviceCreateTexture(RawId aSelfId,
+ const dom::GPUTextureDescriptor& aDesc) {
+ ffi::WGPUTextureDescriptor desc = {};
+
+ webgpu::StringHelper label(aDesc.mLabel);
+ desc.label = label.Get();
+
+ if (aDesc.mSize.IsRangeEnforcedUnsignedLongSequence()) {
+ const auto& seq = aDesc.mSize.GetAsRangeEnforcedUnsignedLongSequence();
+ desc.size.width = seq.Length() > 0 ? seq[0] : 1;
+ desc.size.height = seq.Length() > 1 ? seq[1] : 1;
+ desc.size.depth_or_array_layers = seq.Length() > 2 ? seq[2] : 1;
+ } else if (aDesc.mSize.IsGPUExtent3DDict()) {
+ const auto& dict = aDesc.mSize.GetAsGPUExtent3DDict();
+ desc.size.width = dict.mWidth;
+ desc.size.height = dict.mHeight;
+ desc.size.depth_or_array_layers = dict.mDepthOrArrayLayers;
+ } else {
+ MOZ_CRASH("Unexpected union");
+ }
+ desc.mip_level_count = aDesc.mMipLevelCount;
+ desc.sample_count = aDesc.mSampleCount;
+ desc.dimension = ffi::WGPUTextureDimension(aDesc.mDimension);
+ desc.format = ConvertTextureFormat(aDesc.mFormat);
+ desc.usage = aDesc.mUsage;
+
+ ByteBuf bb;
+ RawId id = ffi::wgpu_client_create_texture(mClient.get(), aSelfId, &desc,
+ ToFFI(&bb));
+ if (!SendDeviceAction(aSelfId, std::move(bb))) {
+ MOZ_CRASH("IPC failure");
+ }
+ return id;
+}
+
+RawId WebGPUChild::TextureCreateView(
+ RawId aSelfId, RawId aDeviceId,
+ const dom::GPUTextureViewDescriptor& aDesc) {
+ ffi::WGPUTextureViewDescriptor desc = {};
+
+ webgpu::StringHelper label(aDesc.mLabel);
+ desc.label = label.Get();
+
+ ffi::WGPUTextureFormat format = {ffi::WGPUTextureFormat_Sentinel};
+ if (aDesc.mFormat.WasPassed()) {
+ format = ConvertTextureFormat(aDesc.mFormat.Value());
+ desc.format = &format;
+ }
+ ffi::WGPUTextureViewDimension dimension =
+ ffi::WGPUTextureViewDimension_Sentinel;
+ if (aDesc.mDimension.WasPassed()) {
+ dimension = ffi::WGPUTextureViewDimension(aDesc.mDimension.Value());
+ desc.dimension = &dimension;
+ }
+
+ desc.aspect = ffi::WGPUTextureAspect(aDesc.mAspect);
+ desc.base_mip_level = aDesc.mBaseMipLevel;
+ desc.mip_level_count =
+ aDesc.mMipLevelCount.WasPassed() ? aDesc.mMipLevelCount.Value() : 0;
+ desc.base_array_layer = aDesc.mBaseArrayLayer;
+ desc.array_layer_count =
+ aDesc.mArrayLayerCount.WasPassed() ? aDesc.mArrayLayerCount.Value() : 0;
+
+ ByteBuf bb;
+ RawId id = ffi::wgpu_client_create_texture_view(mClient.get(), aSelfId, &desc,
+ ToFFI(&bb));
+ if (!SendTextureAction(aSelfId, aDeviceId, std::move(bb))) {
+ MOZ_CRASH("IPC failure");
+ }
+ return id;
+}
+
+RawId WebGPUChild::DeviceCreateSampler(RawId aSelfId,
+ const dom::GPUSamplerDescriptor& aDesc) {
+ ffi::WGPUSamplerDescriptor desc = {};
+
+ webgpu::StringHelper label(aDesc.mLabel);
+ desc.label = label.Get();
+ desc.address_modes[0] = ffi::WGPUAddressMode(aDesc.mAddressModeU);
+ desc.address_modes[1] = ffi::WGPUAddressMode(aDesc.mAddressModeV);
+ desc.address_modes[2] = ffi::WGPUAddressMode(aDesc.mAddressModeW);
+ desc.mag_filter = ffi::WGPUFilterMode(aDesc.mMagFilter);
+ desc.min_filter = ffi::WGPUFilterMode(aDesc.mMinFilter);
+ desc.mipmap_filter = ffi::WGPUFilterMode(aDesc.mMipmapFilter);
+ desc.lod_min_clamp = aDesc.mLodMinClamp;
+ desc.lod_max_clamp = aDesc.mLodMaxClamp;
+
+ ffi::WGPUCompareFunction comparison = ffi::WGPUCompareFunction_Sentinel;
+ if (aDesc.mCompare.WasPassed()) {
+ comparison = ConvertCompareFunction(aDesc.mCompare.Value());
+ desc.compare = &comparison;
+ }
+
+ ByteBuf bb;
+ RawId id = ffi::wgpu_client_create_sampler(mClient.get(), aSelfId, &desc,
+ ToFFI(&bb));
+ if (!SendDeviceAction(aSelfId, std::move(bb))) {
+ MOZ_CRASH("IPC failure");
+ }
+ return id;
+}
+
+RawId WebGPUChild::DeviceCreateCommandEncoder(
+ RawId aSelfId, const dom::GPUCommandEncoderDescriptor& aDesc) {
+ ffi::WGPUCommandEncoderDescriptor desc = {};
+
+ webgpu::StringHelper label(aDesc.mLabel);
+ desc.label = label.Get();
+
+ ByteBuf bb;
+ RawId id = ffi::wgpu_client_create_command_encoder(mClient.get(), aSelfId,
+ &desc, ToFFI(&bb));
+ if (!SendDeviceAction(aSelfId, std::move(bb))) {
+ MOZ_CRASH("IPC failure");
+ }
+ return id;
+}
+
+RawId WebGPUChild::CommandEncoderFinish(
+ RawId aSelfId, RawId aDeviceId,
+ const dom::GPUCommandBufferDescriptor& aDesc) {
+ if (!SendCommandEncoderFinish(aSelfId, aDeviceId, aDesc)) {
+ MOZ_CRASH("IPC failure");
+ }
+ // We rely on knowledge that `CommandEncoderId` == `CommandBufferId`
+ // TODO: refactor this to truly behave as if the encoder is being finished,
+ // and a new command buffer ID is being created from it. Resolve the ID
+ // type aliasing at the place that introduces it: `wgpu-core`.
+ return aSelfId;
+}
+
+RawId WebGPUChild::RenderBundleEncoderFinish(
+ ffi::WGPURenderBundleEncoder& aEncoder, RawId aDeviceId,
+ const dom::GPURenderBundleDescriptor& aDesc) {
+ ffi::WGPURenderBundleDescriptor desc = {};
+
+ webgpu::StringHelper label(aDesc.mLabel);
+ desc.label = label.Get();
+
+ ipc::ByteBuf bb;
+ RawId id = ffi::wgpu_client_create_render_bundle(
+ mClient.get(), &aEncoder, aDeviceId, &desc, ToFFI(&bb));
+
+ if (!SendDeviceAction(aDeviceId, std::move(bb))) {
+ MOZ_CRASH("IPC failure");
+ }
+
+ return id;
+}
+
+RawId WebGPUChild::DeviceCreateBindGroupLayout(
+ RawId aSelfId, const dom::GPUBindGroupLayoutDescriptor& aDesc) {
+ struct OptionalData {
+ ffi::WGPUTextureViewDimension dim;
+ ffi::WGPURawTextureSampleType type;
+ ffi::WGPUTextureFormat format;
+ };
+ nsTArray<OptionalData> optional(aDesc.mEntries.Length());
+ for (const auto& entry : aDesc.mEntries) {
+ OptionalData data = {};
+ if (entry.mTexture.WasPassed()) {
+ const auto& texture = entry.mTexture.Value();
+ data.dim = ffi::WGPUTextureViewDimension(texture.mViewDimension);
+ switch (texture.mSampleType) {
+ case dom::GPUTextureSampleType::Float:
+ data.type = ffi::WGPURawTextureSampleType_Float;
+ break;
+ case dom::GPUTextureSampleType::Unfilterable_float:
+ data.type = ffi::WGPURawTextureSampleType_UnfilterableFloat;
+ break;
+ case dom::GPUTextureSampleType::Uint:
+ data.type = ffi::WGPURawTextureSampleType_Uint;
+ break;
+ case dom::GPUTextureSampleType::Sint:
+ data.type = ffi::WGPURawTextureSampleType_Sint;
+ break;
+ case dom::GPUTextureSampleType::Depth:
+ data.type = ffi::WGPURawTextureSampleType_Depth;
+ break;
+ case dom::GPUTextureSampleType::EndGuard_:
+ MOZ_ASSERT_UNREACHABLE();
+ }
+ }
+ if (entry.mStorageTexture.WasPassed()) {
+ const auto& texture = entry.mStorageTexture.Value();
+ data.dim = ffi::WGPUTextureViewDimension(texture.mViewDimension);
+ data.format = ConvertTextureFormat(texture.mFormat);
+ }
+ optional.AppendElement(data);
+ }
+
+ nsTArray<ffi::WGPUBindGroupLayoutEntry> entries(aDesc.mEntries.Length());
+ for (size_t i = 0; i < aDesc.mEntries.Length(); ++i) {
+ const auto& entry = aDesc.mEntries[i];
+ ffi::WGPUBindGroupLayoutEntry e = {};
+ e.binding = entry.mBinding;
+ e.visibility = entry.mVisibility;
+ if (entry.mBuffer.WasPassed()) {
+ switch (entry.mBuffer.Value().mType) {
+ case dom::GPUBufferBindingType::Uniform:
+ e.ty = ffi::WGPURawBindingType_UniformBuffer;
+ break;
+ case dom::GPUBufferBindingType::Storage:
+ e.ty = ffi::WGPURawBindingType_StorageBuffer;
+ break;
+ case dom::GPUBufferBindingType::Read_only_storage:
+ e.ty = ffi::WGPURawBindingType_ReadonlyStorageBuffer;
+ break;
+ case dom::GPUBufferBindingType::EndGuard_:
+ MOZ_ASSERT_UNREACHABLE();
+ }
+ e.has_dynamic_offset = entry.mBuffer.Value().mHasDynamicOffset;
+ }
+ if (entry.mTexture.WasPassed()) {
+ e.ty = ffi::WGPURawBindingType_SampledTexture;
+ e.view_dimension = &optional[i].dim;
+ e.texture_sample_type = &optional[i].type;
+ e.multisampled = entry.mTexture.Value().mMultisampled;
+ }
+ if (entry.mStorageTexture.WasPassed()) {
+ e.ty = entry.mStorageTexture.Value().mAccess ==
+ dom::GPUStorageTextureAccess::Write_only
+ ? ffi::WGPURawBindingType_WriteonlyStorageTexture
+ : ffi::WGPURawBindingType_ReadonlyStorageTexture;
+ e.view_dimension = &optional[i].dim;
+ e.storage_texture_format = &optional[i].format;
+ }
+ if (entry.mSampler.WasPassed()) {
+ e.ty = ffi::WGPURawBindingType_Sampler;
+ switch (entry.mSampler.Value().mType) {
+ case dom::GPUSamplerBindingType::Filtering:
+ e.sampler_filter = true;
+ break;
+ case dom::GPUSamplerBindingType::Non_filtering:
+ break;
+ case dom::GPUSamplerBindingType::Comparison:
+ e.sampler_compare = true;
+ break;
+ case dom::GPUSamplerBindingType::EndGuard_:
+ MOZ_ASSERT_UNREACHABLE();
+ }
+ }
+ entries.AppendElement(e);
+ }
+
+ ffi::WGPUBindGroupLayoutDescriptor desc = {};
+
+ webgpu::StringHelper label(aDesc.mLabel);
+ desc.label = label.Get();
+ desc.entries = entries.Elements();
+ desc.entries_length = entries.Length();
+
+ ByteBuf bb;
+ RawId id = ffi::wgpu_client_create_bind_group_layout(mClient.get(), aSelfId,
+ &desc, ToFFI(&bb));
+ if (!SendDeviceAction(aSelfId, std::move(bb))) {
+ MOZ_CRASH("IPC failure");
+ }
+ return id;
+}
+
+RawId WebGPUChild::DeviceCreatePipelineLayout(
+ RawId aSelfId, const dom::GPUPipelineLayoutDescriptor& aDesc) {
+ nsTArray<ffi::WGPUBindGroupLayoutId> bindGroupLayouts(
+ aDesc.mBindGroupLayouts.Length());
+ for (const auto& layout : aDesc.mBindGroupLayouts) {
+ if (!layout->IsValid()) {
+ return 0;
+ }
+ bindGroupLayouts.AppendElement(layout->mId);
+ }
+
+ ffi::WGPUPipelineLayoutDescriptor desc = {};
+
+ webgpu::StringHelper label(aDesc.mLabel);
+ desc.label = label.Get();
+ desc.bind_group_layouts = bindGroupLayouts.Elements();
+ desc.bind_group_layouts_length = bindGroupLayouts.Length();
+
+ ByteBuf bb;
+ RawId id = ffi::wgpu_client_create_pipeline_layout(mClient.get(), aSelfId,
+ &desc, ToFFI(&bb));
+ if (!SendDeviceAction(aSelfId, std::move(bb))) {
+ MOZ_CRASH("IPC failure");
+ }
+ return id;
+}
+
+RawId WebGPUChild::DeviceCreateBindGroup(
+ RawId aSelfId, const dom::GPUBindGroupDescriptor& aDesc) {
+ if (!aDesc.mLayout->IsValid()) {
+ return 0;
+ }
+
+ nsTArray<ffi::WGPUBindGroupEntry> entries(aDesc.mEntries.Length());
+ for (const auto& entry : aDesc.mEntries) {
+ ffi::WGPUBindGroupEntry e = {};
+ e.binding = entry.mBinding;
+ if (entry.mResource.IsGPUBufferBinding()) {
+ const auto& bufBinding = entry.mResource.GetAsGPUBufferBinding();
+ e.buffer = bufBinding.mBuffer->mId;
+ e.offset = bufBinding.mOffset;
+ e.size = bufBinding.mSize.WasPassed() ? bufBinding.mSize.Value() : 0;
+ }
+ if (entry.mResource.IsGPUTextureView()) {
+ e.texture_view = entry.mResource.GetAsGPUTextureView()->mId;
+ }
+ if (entry.mResource.IsGPUSampler()) {
+ e.sampler = entry.mResource.GetAsGPUSampler()->mId;
+ }
+ entries.AppendElement(e);
+ }
+
+ ffi::WGPUBindGroupDescriptor desc = {};
+
+ webgpu::StringHelper label(aDesc.mLabel);
+ desc.label = label.Get();
+ desc.layout = aDesc.mLayout->mId;
+ desc.entries = entries.Elements();
+ desc.entries_length = entries.Length();
+
+ ByteBuf bb;
+ RawId id = ffi::wgpu_client_create_bind_group(mClient.get(), aSelfId, &desc,
+ ToFFI(&bb));
+ if (!SendDeviceAction(aSelfId, std::move(bb))) {
+ MOZ_CRASH("IPC failure");
+ }
+ return id;
+}
+
+already_AddRefed<ShaderModule> WebGPUChild::DeviceCreateShaderModule(
+ Device* aDevice, const dom::GPUShaderModuleDescriptor& aDesc,
+ RefPtr<dom::Promise> aPromise) {
+ RawId deviceId = aDevice->mId;
+ RawId moduleId =
+ ffi::wgpu_client_make_shader_module_id(mClient.get(), deviceId);
+
+ RefPtr<ShaderModule> shaderModule =
+ new ShaderModule(aDevice, moduleId, aPromise);
+
+ nsString noLabel;
+ const nsString& label =
+ aDesc.mLabel.WasPassed() ? aDesc.mLabel.Value() : noLabel;
+ SendDeviceCreateShaderModule(deviceId, moduleId, label, aDesc.mCode)
+ ->Then(
+ GetCurrentSerialEventTarget(), __func__,
+ [aPromise,
+ shaderModule](nsTArray<WebGPUCompilationMessage>&& messages) {
+ RefPtr<CompilationInfo> infoObject(
+ new CompilationInfo(shaderModule));
+ infoObject->SetMessages(messages);
+ aPromise->MaybeResolve(infoObject);
+ },
+ [aPromise](const ipc::ResponseRejectReason& aReason) {
+ aPromise->MaybeRejectWithNotSupportedError("IPC error");
+ });
+
+ return shaderModule.forget();
+}
+
+RawId WebGPUChild::DeviceCreateComputePipelineImpl(
+ PipelineCreationContext* const aContext,
+ const dom::GPUComputePipelineDescriptor& aDesc, ByteBuf* const aByteBuf) {
+ ffi::WGPUComputePipelineDescriptor desc = {};
+ nsCString label, entryPoint;
+ if (aDesc.mLabel.WasPassed()) {
+ LossyCopyUTF16toASCII(aDesc.mLabel.Value(), label);
+ desc.label = label.get();
+ }
+ if (aDesc.mLayout.WasPassed()) {
+ desc.layout = aDesc.mLayout.Value().mId;
+ }
+ desc.stage.module = aDesc.mCompute.mModule->mId;
+ LossyCopyUTF16toASCII(aDesc.mCompute.mEntryPoint, entryPoint);
+ desc.stage.entry_point = entryPoint.get();
+
+ RawId implicit_bgl_ids[WGPUMAX_BIND_GROUPS] = {};
+ RawId id = ffi::wgpu_client_create_compute_pipeline(
+ mClient.get(), aContext->mParentId, &desc, ToFFI(aByteBuf),
+ &aContext->mImplicitPipelineLayoutId, implicit_bgl_ids);
+
+ for (const auto& cur : implicit_bgl_ids) {
+ if (!cur) break;
+ aContext->mImplicitBindGroupLayoutIds.AppendElement(cur);
+ }
+
+ return id;
+}
+
+RawId WebGPUChild::DeviceCreateComputePipeline(
+ PipelineCreationContext* const aContext,
+ const dom::GPUComputePipelineDescriptor& aDesc) {
+ ByteBuf bb;
+ const RawId id = DeviceCreateComputePipelineImpl(aContext, aDesc, &bb);
+
+ if (!SendDeviceAction(aContext->mParentId, std::move(bb))) {
+ MOZ_CRASH("IPC failure");
+ }
+ return id;
+}
+
+RefPtr<PipelinePromise> WebGPUChild::DeviceCreateComputePipelineAsync(
+ PipelineCreationContext* const aContext,
+ const dom::GPUComputePipelineDescriptor& aDesc) {
+ ByteBuf bb;
+ const RawId id = DeviceCreateComputePipelineImpl(aContext, aDesc, &bb);
+
+ return SendDeviceActionWithAck(aContext->mParentId, std::move(bb))
+ ->Then(
+ GetCurrentSerialEventTarget(), __func__,
+ [id](bool aDummy) {
+ Unused << aDummy;
+ return PipelinePromise::CreateAndResolve(id, __func__);
+ },
+ [](const ipc::ResponseRejectReason& aReason) {
+ return PipelinePromise::CreateAndReject(aReason, __func__);
+ });
+}
+
+static ffi::WGPUMultisampleState ConvertMultisampleState(
+ const dom::GPUMultisampleState& aDesc) {
+ ffi::WGPUMultisampleState desc = {};
+ desc.count = aDesc.mCount;
+ desc.mask = aDesc.mMask;
+ desc.alpha_to_coverage_enabled = aDesc.mAlphaToCoverageEnabled;
+ return desc;
+}
+
+static ffi::WGPUBlendComponent ConvertBlendComponent(
+ const dom::GPUBlendComponent& aDesc) {
+ ffi::WGPUBlendComponent desc = {};
+ desc.src_factor = ffi::WGPUBlendFactor(aDesc.mSrcFactor);
+ desc.dst_factor = ffi::WGPUBlendFactor(aDesc.mDstFactor);
+ desc.operation = ffi::WGPUBlendOperation(aDesc.mOperation);
+ return desc;
+}
+
+static ffi::WGPUStencilFaceState ConvertStencilFaceState(
+ const dom::GPUStencilFaceState& aDesc) {
+ ffi::WGPUStencilFaceState desc = {};
+ desc.compare = ConvertCompareFunction(aDesc.mCompare);
+ desc.fail_op = ffi::WGPUStencilOperation(aDesc.mFailOp);
+ desc.depth_fail_op = ffi::WGPUStencilOperation(aDesc.mDepthFailOp);
+ desc.pass_op = ffi::WGPUStencilOperation(aDesc.mPassOp);
+ return desc;
+}
+
+static ffi::WGPUDepthStencilState ConvertDepthStencilState(
+ const dom::GPUDepthStencilState& aDesc) {
+ ffi::WGPUDepthStencilState desc = {};
+ desc.format = ConvertTextureFormat(aDesc.mFormat);
+ desc.depth_write_enabled = aDesc.mDepthWriteEnabled;
+ desc.depth_compare = ConvertCompareFunction(aDesc.mDepthCompare);
+ desc.stencil.front = ConvertStencilFaceState(aDesc.mStencilFront);
+ desc.stencil.back = ConvertStencilFaceState(aDesc.mStencilBack);
+ desc.stencil.read_mask = aDesc.mStencilReadMask;
+ desc.stencil.write_mask = aDesc.mStencilWriteMask;
+ desc.bias.constant = aDesc.mDepthBias;
+ desc.bias.slope_scale = aDesc.mDepthBiasSlopeScale;
+ desc.bias.clamp = aDesc.mDepthBiasClamp;
+ return desc;
+}
+
+RawId WebGPUChild::DeviceCreateRenderPipelineImpl(
+ PipelineCreationContext* const aContext,
+ const dom::GPURenderPipelineDescriptor& aDesc, ByteBuf* const aByteBuf) {
+ // A bunch of stack locals that we can have pointers into
+ nsTArray<ffi::WGPUVertexBufferLayout> vertexBuffers;
+ nsTArray<ffi::WGPUVertexAttribute> vertexAttributes;
+ ffi::WGPURenderPipelineDescriptor desc = {};
+ nsCString vsEntry, fsEntry;
+ ffi::WGPUIndexFormat stripIndexFormat = ffi::WGPUIndexFormat_Uint16;
+ ffi::WGPUFace cullFace = ffi::WGPUFace_Front;
+ ffi::WGPUVertexState vertexState = {};
+ ffi::WGPUFragmentState fragmentState = {};
+ nsTArray<ffi::WGPUColorTargetState> colorStates;
+ nsTArray<ffi::WGPUBlendState> blendStates;
+
+ webgpu::StringHelper label(aDesc.mLabel);
+ desc.label = label.Get();
+
+ if (aDesc.mLayout.WasPassed()) {
+ desc.layout = aDesc.mLayout.Value().mId;
+ }
+
+ {
+ const auto& stage = aDesc.mVertex;
+ vertexState.stage.module = stage.mModule->mId;
+ LossyCopyUTF16toASCII(stage.mEntryPoint, vsEntry);
+ vertexState.stage.entry_point = vsEntry.get();
+
+ for (const auto& vertex_desc : stage.mBuffers) {
+ ffi::WGPUVertexBufferLayout vb_desc = {};
+ if (!vertex_desc.IsNull()) {
+ const auto& vd = vertex_desc.Value();
+ vb_desc.array_stride = vd.mArrayStride;
+ vb_desc.step_mode = ffi::WGPUVertexStepMode(vd.mStepMode);
+ // Note: we are setting the length but not the pointer
+ vb_desc.attributes_length = vd.mAttributes.Length();
+ for (const auto& vat : vd.mAttributes) {
+ ffi::WGPUVertexAttribute ad = {};
+ ad.offset = vat.mOffset;
+ ad.format = ffi::WGPUVertexFormat(vat.mFormat);
+ ad.shader_location = vat.mShaderLocation;
+ vertexAttributes.AppendElement(ad);
+ }
+ }
+ vertexBuffers.AppendElement(vb_desc);
+ }
+ // Now patch up all the pointers to attribute lists.
+ size_t numAttributes = 0;
+ for (auto& vb_desc : vertexBuffers) {
+ vb_desc.attributes = vertexAttributes.Elements() + numAttributes;
+ numAttributes += vb_desc.attributes_length;
+ }
+
+ vertexState.buffers = vertexBuffers.Elements();
+ vertexState.buffers_length = vertexBuffers.Length();
+ desc.vertex = &vertexState;
+ }
+
+ if (aDesc.mFragment.WasPassed()) {
+ const auto& stage = aDesc.mFragment.Value();
+ fragmentState.stage.module = stage.mModule->mId;
+ LossyCopyUTF16toASCII(stage.mEntryPoint, fsEntry);
+ fragmentState.stage.entry_point = fsEntry.get();
+
+ // Note: we pre-collect the blend states into a different array
+ // so that we can have non-stale pointers into it.
+ for (const auto& colorState : stage.mTargets) {
+ ffi::WGPUColorTargetState desc = {};
+ desc.format = ConvertTextureFormat(colorState.mFormat);
+ desc.write_mask = colorState.mWriteMask;
+ colorStates.AppendElement(desc);
+ ffi::WGPUBlendState bs = {};
+ if (colorState.mBlend.WasPassed()) {
+ const auto& blend = colorState.mBlend.Value();
+ bs.alpha = ConvertBlendComponent(blend.mAlpha);
+ bs.color = ConvertBlendComponent(blend.mColor);
+ }
+ blendStates.AppendElement(bs);
+ }
+ for (size_t i = 0; i < colorStates.Length(); ++i) {
+ if (stage.mTargets[i].mBlend.WasPassed()) {
+ colorStates[i].blend = &blendStates[i];
+ }
+ }
+
+ fragmentState.targets = colorStates.Elements();
+ fragmentState.targets_length = colorStates.Length();
+ desc.fragment = &fragmentState;
+ }
+
+ {
+ const auto& prim = aDesc.mPrimitive;
+ desc.primitive.topology = ffi::WGPUPrimitiveTopology(prim.mTopology);
+ if (prim.mStripIndexFormat.WasPassed()) {
+ stripIndexFormat = ffi::WGPUIndexFormat(prim.mStripIndexFormat.Value());
+ desc.primitive.strip_index_format = &stripIndexFormat;
+ }
+ desc.primitive.front_face = ffi::WGPUFrontFace(prim.mFrontFace);
+ if (prim.mCullMode != dom::GPUCullMode::None) {
+ cullFace = prim.mCullMode == dom::GPUCullMode::Front ? ffi::WGPUFace_Front
+ : ffi::WGPUFace_Back;
+ desc.primitive.cull_mode = &cullFace;
+ }
+ }
+ desc.multisample = ConvertMultisampleState(aDesc.mMultisample);
+
+ ffi::WGPUDepthStencilState depthStencilState = {};
+ if (aDesc.mDepthStencil.WasPassed()) {
+ depthStencilState = ConvertDepthStencilState(aDesc.mDepthStencil.Value());
+ desc.depth_stencil = &depthStencilState;
+ }
+
+ RawId implicit_bgl_ids[WGPUMAX_BIND_GROUPS] = {};
+ RawId id = ffi::wgpu_client_create_render_pipeline(
+ mClient.get(), aContext->mParentId, &desc, ToFFI(aByteBuf),
+ &aContext->mImplicitPipelineLayoutId, implicit_bgl_ids);
+
+ for (const auto& cur : implicit_bgl_ids) {
+ if (!cur) break;
+ aContext->mImplicitBindGroupLayoutIds.AppendElement(cur);
+ }
+
+ return id;
+}
+
+RawId WebGPUChild::DeviceCreateRenderPipeline(
+ PipelineCreationContext* const aContext,
+ const dom::GPURenderPipelineDescriptor& aDesc) {
+ ByteBuf bb;
+ const RawId id = DeviceCreateRenderPipelineImpl(aContext, aDesc, &bb);
+
+ if (!SendDeviceAction(aContext->mParentId, std::move(bb))) {
+ MOZ_CRASH("IPC failure");
+ }
+ return id;
+}
+
+RefPtr<PipelinePromise> WebGPUChild::DeviceCreateRenderPipelineAsync(
+ PipelineCreationContext* const aContext,
+ const dom::GPURenderPipelineDescriptor& aDesc) {
+ ByteBuf bb;
+ const RawId id = DeviceCreateRenderPipelineImpl(aContext, aDesc, &bb);
+
+ return SendDeviceActionWithAck(aContext->mParentId, std::move(bb))
+ ->Then(
+ GetCurrentSerialEventTarget(), __func__,
+ [id](bool aDummy) {
+ Unused << aDummy;
+ return PipelinePromise::CreateAndResolve(id, __func__);
+ },
+ [](const ipc::ResponseRejectReason& aReason) {
+ return PipelinePromise::CreateAndReject(aReason, __func__);
+ });
+}
+
+ipc::IPCResult WebGPUChild::RecvDeviceUncapturedError(
+ RawId aDeviceId, const nsACString& aMessage) {
+ auto targetIter = mDeviceMap.find(aDeviceId);
+ if (!aDeviceId || targetIter == mDeviceMap.end()) {
+ JsWarning(nullptr, aMessage);
+ } else {
+ auto* target = targetIter->second.get();
+ MOZ_ASSERT(target);
+ // We don't want to spam the errors to the console indefinitely
+ if (target->CheckNewWarning(aMessage)) {
+ JsWarning(target->GetOwnerGlobal(), aMessage);
+
+ dom::GPUUncapturedErrorEventInit init;
+ init.mError.SetAsGPUValidationError() =
+ new ValidationError(target->GetParentObject(), aMessage);
+ RefPtr<mozilla::dom::GPUUncapturedErrorEvent> event =
+ dom::GPUUncapturedErrorEvent::Constructor(
+ target, u"uncapturederror"_ns, init);
+ target->DispatchEvent(*event);
+ }
+ }
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUChild::RecvDropAction(const ipc::ByteBuf& aByteBuf) {
+ const auto* byteBuf = ToFFI(&aByteBuf);
+ ffi::wgpu_client_drop_action(mClient.get(), byteBuf);
+ return IPC_OK();
+}
+
+void WebGPUChild::DeviceCreateSwapChain(
+ RawId aSelfId, const RGBDescriptor& aRgbDesc, size_t maxBufferCount,
+ const layers::RemoteTextureOwnerId& aOwnerId) {
+ RawId queueId = aSelfId; // TODO: multiple queues
+ nsTArray<RawId> bufferIds(maxBufferCount);
+ for (size_t i = 0; i < maxBufferCount; ++i) {
+ bufferIds.AppendElement(
+ ffi::wgpu_client_make_buffer_id(mClient.get(), aSelfId));
+ }
+ SendDeviceCreateSwapChain(aSelfId, queueId, aRgbDesc, bufferIds, aOwnerId);
+}
+
+void WebGPUChild::SwapChainPresent(RawId aTextureId,
+ const RemoteTextureId& aRemoteTextureId,
+ const RemoteTextureOwnerId& aOwnerId) {
+ // Hack: the function expects `DeviceId`, but it only uses it for `backend()`
+ // selection.
+ RawId encoderId = ffi::wgpu_client_make_encoder_id(mClient.get(), aTextureId);
+ SendSwapChainPresent(aTextureId, encoderId, aRemoteTextureId, aOwnerId);
+}
+
+void WebGPUChild::RegisterDevice(Device* const aDevice) {
+ mDeviceMap.insert({aDevice->mId, aDevice});
+}
+
+void WebGPUChild::UnregisterDevice(RawId aId) {
+ mDeviceMap.erase(aId);
+ if (IsOpen()) {
+ SendDeviceDestroy(aId);
+ }
+}
+
+void WebGPUChild::FreeUnregisteredInParentDevice(RawId aId) {
+ ffi::wgpu_client_kill_device_id(mClient.get(), aId);
+ mDeviceMap.erase(aId);
+}
+
+void WebGPUChild::ActorDestroy(ActorDestroyReason) {
+ // Resolving the promise could cause us to update the original map if the
+ // callee frees the Device objects immediately. Since any remaining entries
+ // in the map are no longer valid, we can just move the map onto the stack.
+ const auto deviceMap = std::move(mDeviceMap);
+ mDeviceMap.clear();
+
+ for (const auto& targetIter : deviceMap) {
+ RefPtr<Device> device = targetIter.second.get();
+ if (!device) {
+ // The Device may have gotten freed when we resolved the Promise for
+ // another Device in the map.
+ continue;
+ }
+
+ RefPtr<dom::Promise> promise = device->MaybeGetLost();
+ if (!promise) {
+ continue;
+ }
+
+ auto info = MakeRefPtr<DeviceLostInfo>(device->GetParentObject(),
+ u"WebGPUChild destroyed"_ns);
+
+ // We have strong references to both the Device and the DeviceLostInfo and
+ // the Promise objects on the stack which keeps them alive for long enough.
+ promise->MaybeResolve(info);
+ }
+}
+
+} // namespace mozilla::webgpu
diff --git a/dom/webgpu/ipc/WebGPUChild.h b/dom/webgpu/ipc/WebGPUChild.h
new file mode 100644
index 0000000000..7ab993239c
--- /dev/null
+++ b/dom/webgpu/ipc/WebGPUChild.h
@@ -0,0 +1,146 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef WEBGPU_CHILD_H_
+#define WEBGPU_CHILD_H_
+
+#include "mozilla/webgpu/PWebGPUChild.h"
+#include "mozilla/MozPromise.h"
+#include "mozilla/WeakPtr.h"
+#include "mozilla/webgpu/ffi/wgpu.h"
+
+namespace mozilla {
+namespace ipc {
+class UnsafeSharedMemoryHandle;
+} // namespace ipc
+namespace dom {
+struct GPURequestAdapterOptions;
+} // namespace dom
+namespace layers {
+class CompositorBridgeChild;
+} // namespace layers
+namespace webgpu {
+namespace ffi {
+struct WGPUClient;
+struct WGPULimits;
+struct WGPUTextureViewDescriptor;
+} // namespace ffi
+
+using AdapterPromise =
+ MozPromise<ipc::ByteBuf, Maybe<ipc::ResponseRejectReason>, true>;
+using PipelinePromise = MozPromise<RawId, ipc::ResponseRejectReason, true>;
+using DevicePromise = MozPromise<bool, ipc::ResponseRejectReason, true>;
+
+struct PipelineCreationContext {
+ RawId mParentId = 0;
+ RawId mImplicitPipelineLayoutId = 0;
+ nsTArray<RawId> mImplicitBindGroupLayoutIds;
+};
+
+struct DeviceRequest {
+ RawId mId = 0;
+ RefPtr<DevicePromise> mPromise;
+ // Note: we could put `ffi::WGPULimits` in here as well,
+ // but we don't want to #include ffi stuff in this header
+};
+
+ffi::WGPUByteBuf* ToFFI(ipc::ByteBuf* x);
+
+class WebGPUChild final : public PWebGPUChild, public SupportsWeakPtr {
+ public:
+ friend class layers::CompositorBridgeChild;
+
+ NS_DECL_CYCLE_COLLECTION_NATIVE_CLASS(WebGPUChild)
+ NS_INLINE_DECL_CYCLE_COLLECTING_NATIVE_REFCOUNTING_INHERITED(WebGPUChild)
+
+ public:
+ explicit WebGPUChild();
+
+ bool IsOpen() const { return CanSend(); }
+
+ RefPtr<AdapterPromise> InstanceRequestAdapter(
+ const dom::GPURequestAdapterOptions& aOptions);
+ Maybe<DeviceRequest> AdapterRequestDevice(
+ RawId aSelfId, const dom::GPUDeviceDescriptor& aDesc,
+ ffi::WGPULimits* aLimits);
+ RawId DeviceCreateBuffer(RawId aSelfId, const dom::GPUBufferDescriptor& aDesc,
+ ipc::UnsafeSharedMemoryHandle&& aShmem);
+ RawId DeviceCreateTexture(RawId aSelfId,
+ const dom::GPUTextureDescriptor& aDesc);
+ RawId TextureCreateView(RawId aSelfId, RawId aDeviceId,
+ const dom::GPUTextureViewDescriptor& aDesc);
+ RawId DeviceCreateSampler(RawId aSelfId,
+ const dom::GPUSamplerDescriptor& aDesc);
+ RawId DeviceCreateCommandEncoder(
+ RawId aSelfId, const dom::GPUCommandEncoderDescriptor& aDesc);
+ RawId CommandEncoderFinish(RawId aSelfId, RawId aDeviceId,
+ const dom::GPUCommandBufferDescriptor& aDesc);
+ RawId RenderBundleEncoderFinish(ffi::WGPURenderBundleEncoder& aEncoder,
+ RawId aDeviceId,
+ const dom::GPURenderBundleDescriptor& aDesc);
+ RawId DeviceCreateBindGroupLayout(
+ RawId aSelfId, const dom::GPUBindGroupLayoutDescriptor& aDesc);
+ RawId DeviceCreatePipelineLayout(
+ RawId aSelfId, const dom::GPUPipelineLayoutDescriptor& aDesc);
+ RawId DeviceCreateBindGroup(RawId aSelfId,
+ const dom::GPUBindGroupDescriptor& aDesc);
+ RawId DeviceCreateComputePipeline(
+ PipelineCreationContext* const aContext,
+ const dom::GPUComputePipelineDescriptor& aDesc);
+ RefPtr<PipelinePromise> DeviceCreateComputePipelineAsync(
+ PipelineCreationContext* const aContext,
+ const dom::GPUComputePipelineDescriptor& aDesc);
+ RawId DeviceCreateRenderPipeline(
+ PipelineCreationContext* const aContext,
+ const dom::GPURenderPipelineDescriptor& aDesc);
+ RefPtr<PipelinePromise> DeviceCreateRenderPipelineAsync(
+ PipelineCreationContext* const aContext,
+ const dom::GPURenderPipelineDescriptor& aDesc);
+ already_AddRefed<ShaderModule> DeviceCreateShaderModule(
+ Device* aDevice, const dom::GPUShaderModuleDescriptor& aDesc,
+ RefPtr<dom::Promise> aPromise);
+
+ void DeviceCreateSwapChain(RawId aSelfId, const RGBDescriptor& aRgbDesc,
+ size_t maxBufferCount,
+ const layers::RemoteTextureOwnerId& aOwnerId);
+ void SwapChainPresent(RawId aTextureId,
+ const RemoteTextureId& aRemoteTextureId,
+ const RemoteTextureOwnerId& aOwnerId);
+
+ void RegisterDevice(Device* const aDevice);
+ void UnregisterDevice(RawId aId);
+ void FreeUnregisteredInParentDevice(RawId aId);
+
+ static void ConvertTextureFormatRef(const dom::GPUTextureFormat& aInput,
+ ffi::WGPUTextureFormat& aOutput);
+
+ private:
+ virtual ~WebGPUChild();
+
+ void JsWarning(nsIGlobalObject* aGlobal, const nsACString& aMessage);
+
+ RawId DeviceCreateComputePipelineImpl(
+ PipelineCreationContext* const aContext,
+ const dom::GPUComputePipelineDescriptor& aDesc,
+ ipc::ByteBuf* const aByteBuf);
+ RawId DeviceCreateRenderPipelineImpl(
+ PipelineCreationContext* const aContext,
+ const dom::GPURenderPipelineDescriptor& aDesc,
+ ipc::ByteBuf* const aByteBuf);
+
+ UniquePtr<ffi::WGPUClient> const mClient;
+ std::unordered_map<RawId, WeakPtr<Device>> mDeviceMap;
+
+ public:
+ ipc::IPCResult RecvDeviceUncapturedError(RawId aDeviceId,
+ const nsACString& aMessage);
+ ipc::IPCResult RecvDropAction(const ipc::ByteBuf& aByteBuf);
+ void ActorDestroy(ActorDestroyReason) override;
+};
+
+} // namespace webgpu
+} // namespace mozilla
+
+#endif // WEBGPU_CHILD_H_
diff --git a/dom/webgpu/ipc/WebGPUParent.cpp b/dom/webgpu/ipc/WebGPUParent.cpp
new file mode 100644
index 0000000000..f3f9515319
--- /dev/null
+++ b/dom/webgpu/ipc/WebGPUParent.cpp
@@ -0,0 +1,1116 @@
+/* -*- Mode: C++; tab-width: 20; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "WebGPUParent.h"
+#include "mozilla/PodOperations.h"
+#include "mozilla/webgpu/ffi/wgpu.h"
+#include "mozilla/layers/CompositorThread.h"
+#include "mozilla/layers/ImageDataSerializer.h"
+#include "mozilla/layers/RemoteTextureMap.h"
+#include "mozilla/layers/TextureHost.h"
+#include "mozilla/layers/WebRenderImageHost.h"
+#include "mozilla/layers/WebRenderTextureHost.h"
+
+namespace mozilla::webgpu {
+
+const uint64_t POLL_TIME_MS = 100;
+
+static mozilla::LazyLogModule sLogger("WebGPU");
+
+// A fixed-capacity buffer for receiving textual error messages from
+// `wgpu_bindings`.
+//
+// The `ToFFI` method returns an `ffi::WGPUErrorBuffer` pointing to our
+// buffer, for you to pass to fallible FFI-visible `wgpu_bindings`
+// functions. These indicate failure by storing an error message in the
+// buffer, which you can retrieve by calling `GetError`.
+//
+// If you call `ToFFI` on this type, you must also call `GetError` to check for
+// an error. Otherwise, the destructor asserts.
+//
+// TODO: refactor this to avoid stack-allocating the buffer all the time.
+class ErrorBuffer {
+ // if the message doesn't fit, it will be truncated
+ static constexpr unsigned BUFFER_SIZE = 512;
+ char mUtf8[BUFFER_SIZE] = {};
+ bool mGuard = false;
+
+ public:
+ ErrorBuffer() { mUtf8[0] = 0; }
+ ErrorBuffer(const ErrorBuffer&) = delete;
+ ~ErrorBuffer() { MOZ_ASSERT(!mGuard); }
+
+ ffi::WGPUErrorBuffer ToFFI() {
+ mGuard = true;
+ ffi::WGPUErrorBuffer errorBuf = {mUtf8, BUFFER_SIZE};
+ return errorBuf;
+ }
+
+ // If an error message was stored in this buffer, return Some(m)
+ // where m is the message as a UTF-8 nsCString. Otherwise, return Nothing.
+ //
+ // Mark this ErrorBuffer as having been handled, so its destructor
+ // won't assert.
+ Maybe<nsCString> GetError() {
+ mGuard = false;
+ if (!mUtf8[0]) {
+ return Nothing();
+ }
+ return Some(nsCString(mUtf8));
+ }
+};
+
+class PresentationData {
+ NS_INLINE_DECL_REFCOUNTING(PresentationData);
+
+ public:
+ RawId mDeviceId = 0;
+ RawId mQueueId = 0;
+ layers::RGBDescriptor mDesc;
+ uint32_t mSourcePitch = 0;
+ int32_t mNextFrameID = 1;
+ std::vector<RawId> mUnassignedBufferIds;
+ std::vector<RawId> mAvailableBufferIds;
+ std::vector<RawId> mQueuedBufferIds;
+ Mutex mBuffersLock MOZ_UNANNOTATED;
+
+ PresentationData(RawId aDeviceId, RawId aQueueId,
+ const layers::RGBDescriptor& aDesc, uint32_t aSourcePitch,
+ const nsTArray<RawId>& aBufferIds)
+ : mDeviceId(aDeviceId),
+ mQueueId(aQueueId),
+ mDesc(aDesc),
+ mSourcePitch(aSourcePitch),
+ mBuffersLock("WebGPU presentation buffers") {
+ MOZ_COUNT_CTOR(PresentationData);
+
+ for (const RawId id : aBufferIds) {
+ mUnassignedBufferIds.push_back(id);
+ }
+ }
+
+ private:
+ ~PresentationData() { MOZ_COUNT_DTOR(PresentationData); }
+};
+
+static void FreeAdapter(RawId id, void* param) {
+ ipc::ByteBuf byteBuf;
+ wgpu_server_adapter_free(id, ToFFI(&byteBuf));
+ if (!static_cast<WebGPUParent*>(param)->SendDropAction(std::move(byteBuf))) {
+ NS_ERROR("Unable FreeAdapter");
+ }
+}
+static void FreeDevice(RawId id, void* param) {
+ ipc::ByteBuf byteBuf;
+ wgpu_server_device_free(id, ToFFI(&byteBuf));
+ if (!static_cast<WebGPUParent*>(param)->SendDropAction(std::move(byteBuf))) {
+ NS_ERROR("Unable FreeDevice");
+ }
+}
+static void FreeShaderModule(RawId id, void* param) {
+ ipc::ByteBuf byteBuf;
+ wgpu_server_shader_module_free(id, ToFFI(&byteBuf));
+ if (!static_cast<WebGPUParent*>(param)->SendDropAction(std::move(byteBuf))) {
+ NS_ERROR("Unable FreeShaderModule");
+ }
+}
+static void FreePipelineLayout(RawId id, void* param) {
+ ipc::ByteBuf byteBuf;
+ wgpu_server_pipeline_layout_free(id, ToFFI(&byteBuf));
+ if (!static_cast<WebGPUParent*>(param)->SendDropAction(std::move(byteBuf))) {
+ NS_ERROR("Unable FreePipelineLayout");
+ }
+}
+static void FreeBindGroupLayout(RawId id, void* param) {
+ ipc::ByteBuf byteBuf;
+ wgpu_server_bind_group_layout_free(id, ToFFI(&byteBuf));
+ if (!static_cast<WebGPUParent*>(param)->SendDropAction(std::move(byteBuf))) {
+ NS_ERROR("Unable FreeBindGroupLayout");
+ }
+}
+static void FreeBindGroup(RawId id, void* param) {
+ ipc::ByteBuf byteBuf;
+ wgpu_server_bind_group_free(id, ToFFI(&byteBuf));
+ if (!static_cast<WebGPUParent*>(param)->SendDropAction(std::move(byteBuf))) {
+ NS_ERROR("Unable FreeBindGroup");
+ }
+}
+static void FreeCommandBuffer(RawId id, void* param) {
+ ipc::ByteBuf byteBuf;
+ wgpu_server_command_buffer_free(id, ToFFI(&byteBuf));
+ if (!static_cast<WebGPUParent*>(param)->SendDropAction(std::move(byteBuf))) {
+ NS_ERROR("Unable FreeCommandBuffer");
+ }
+}
+static void FreeRenderBundle(RawId id, void* param) {
+ ipc::ByteBuf byteBuf;
+ wgpu_server_render_bundle_free(id, ToFFI(&byteBuf));
+ if (!static_cast<WebGPUParent*>(param)->SendDropAction(std::move(byteBuf))) {
+ NS_ERROR("Unable FreeRenderBundle");
+ }
+}
+static void FreeRenderPipeline(RawId id, void* param) {
+ ipc::ByteBuf byteBuf;
+ wgpu_server_render_pipeline_free(id, ToFFI(&byteBuf));
+ if (!static_cast<WebGPUParent*>(param)->SendDropAction(std::move(byteBuf))) {
+ NS_ERROR("Unable FreeRenderPipeline");
+ }
+}
+static void FreeComputePipeline(RawId id, void* param) {
+ ipc::ByteBuf byteBuf;
+ wgpu_server_compute_pipeline_free(id, ToFFI(&byteBuf));
+ if (!static_cast<WebGPUParent*>(param)->SendDropAction(std::move(byteBuf))) {
+ NS_ERROR("Unable FreeComputePipeline");
+ }
+}
+static void FreeBuffer(RawId id, void* param) {
+ ipc::ByteBuf byteBuf;
+ wgpu_server_buffer_free(id, ToFFI(&byteBuf));
+ if (!static_cast<WebGPUParent*>(param)->SendDropAction(std::move(byteBuf))) {
+ NS_ERROR("Unable FreeBuffer");
+ }
+}
+static void FreeTexture(RawId id, void* param) {
+ ipc::ByteBuf byteBuf;
+ wgpu_server_texture_free(id, ToFFI(&byteBuf));
+ if (!static_cast<WebGPUParent*>(param)->SendDropAction(std::move(byteBuf))) {
+ NS_ERROR("Unable FreeTexture");
+ }
+}
+static void FreeTextureView(RawId id, void* param) {
+ ipc::ByteBuf byteBuf;
+ wgpu_server_texture_view_free(id, ToFFI(&byteBuf));
+ if (!static_cast<WebGPUParent*>(param)->SendDropAction(std::move(byteBuf))) {
+ NS_ERROR("Unable FreeTextureView");
+ }
+}
+static void FreeSampler(RawId id, void* param) {
+ ipc::ByteBuf byteBuf;
+ wgpu_server_sampler_free(id, ToFFI(&byteBuf));
+ if (!static_cast<WebGPUParent*>(param)->SendDropAction(std::move(byteBuf))) {
+ NS_ERROR("Unable FreeSampler");
+ }
+}
+static void FreeSurface(RawId id, void* param) {
+ Unused << id;
+ Unused << param;
+}
+
+static ffi::WGPUIdentityRecyclerFactory MakeFactory(void* param) {
+ ffi::WGPUIdentityRecyclerFactory factory;
+ PodZero(&factory);
+ factory.param = param;
+ factory.free_adapter = FreeAdapter;
+ factory.free_device = FreeDevice;
+ factory.free_pipeline_layout = FreePipelineLayout;
+ factory.free_shader_module = FreeShaderModule;
+ factory.free_bind_group_layout = FreeBindGroupLayout;
+ factory.free_bind_group = FreeBindGroup;
+ factory.free_command_buffer = FreeCommandBuffer;
+ factory.free_render_bundle = FreeRenderBundle;
+ factory.free_render_pipeline = FreeRenderPipeline;
+ factory.free_compute_pipeline = FreeComputePipeline;
+ factory.free_buffer = FreeBuffer;
+ factory.free_texture = FreeTexture;
+ factory.free_texture_view = FreeTextureView;
+ factory.free_sampler = FreeSampler;
+ factory.free_surface = FreeSurface;
+ return factory;
+}
+
+WebGPUParent::WebGPUParent()
+ : mContext(ffi::wgpu_server_new(MakeFactory(this))) {
+ mTimer.Start(base::TimeDelta::FromMilliseconds(POLL_TIME_MS), this,
+ &WebGPUParent::MaintainDevices);
+}
+
+WebGPUParent::~WebGPUParent() = default;
+
+void WebGPUParent::MaintainDevices() {
+ ffi::wgpu_server_poll_all_devices(mContext.get(), false);
+}
+
+bool WebGPUParent::ForwardError(RawId aDeviceId, ErrorBuffer& aError) {
+ // don't do anything if the error is empty
+ auto cString = aError.GetError();
+ if (!cString) {
+ return false;
+ }
+
+ ReportError(aDeviceId, cString.value());
+
+ return true;
+}
+
+// Generate an error on the Device timeline of aDeviceId.
+// aMessage is interpreted as UTF-8.
+void WebGPUParent::ReportError(RawId aDeviceId, const nsCString& aMessage) {
+ // find the appropriate error scope
+ const auto& lookup = mErrorScopeMap.find(aDeviceId);
+ if (lookup != mErrorScopeMap.end() && !lookup->second.mStack.IsEmpty()) {
+ auto& last = lookup->second.mStack.LastElement();
+ if (last.isNothing()) {
+ last.emplace(ScopedError{false, aMessage});
+ }
+ } else {
+ // fall back to the uncaptured error handler
+ if (!SendDeviceUncapturedError(aDeviceId, aMessage)) {
+ NS_ERROR("Unable to SendError");
+ }
+ }
+}
+
+ipc::IPCResult WebGPUParent::RecvInstanceRequestAdapter(
+ const dom::GPURequestAdapterOptions& aOptions,
+ const nsTArray<RawId>& aTargetIds,
+ InstanceRequestAdapterResolver&& resolver) {
+ ffi::WGPURequestAdapterOptions options = {};
+ if (aOptions.mPowerPreference.WasPassed()) {
+ options.power_preference = static_cast<ffi::WGPUPowerPreference>(
+ aOptions.mPowerPreference.Value());
+ }
+ options.force_fallback_adapter = aOptions.mForceFallbackAdapter;
+ // TODO: make available backends configurable by prefs
+
+ ErrorBuffer error;
+ int8_t index = ffi::wgpu_server_instance_request_adapter(
+ mContext.get(), &options, aTargetIds.Elements(), aTargetIds.Length(),
+ error.ToFFI());
+
+ ByteBuf infoByteBuf;
+ // Rust side expects an `Option`, so 0 maps to `None`.
+ uint64_t adapterId = 0;
+ if (index >= 0) {
+ adapterId = aTargetIds[index];
+ }
+ ffi::wgpu_server_adapter_pack_info(mContext.get(), adapterId,
+ ToFFI(&infoByteBuf));
+ resolver(std::move(infoByteBuf));
+ ForwardError(0, error);
+
+ // free the unused IDs
+ ipc::ByteBuf dropByteBuf;
+ for (size_t i = 0; i < aTargetIds.Length(); ++i) {
+ if (static_cast<int8_t>(i) != index) {
+ wgpu_server_adapter_free(aTargetIds[i], ToFFI(&dropByteBuf));
+ }
+ }
+ if (dropByteBuf.mData && !SendDropAction(std::move(dropByteBuf))) {
+ NS_ERROR("Unable to free free unused adapter IDs");
+ }
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvAdapterRequestDevice(
+ RawId aAdapterId, const ipc::ByteBuf& aByteBuf, RawId aDeviceId,
+ AdapterRequestDeviceResolver&& resolver) {
+ ErrorBuffer error;
+ ffi::wgpu_server_adapter_request_device(
+ mContext.get(), aAdapterId, ToFFI(&aByteBuf), aDeviceId, error.ToFFI());
+ if (ForwardError(0, error)) {
+ resolver(false);
+ } else {
+ mErrorScopeMap.insert({aAdapterId, ErrorScopeStack()});
+ resolver(true);
+ }
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvAdapterDestroy(RawId aAdapterId) {
+ ffi::wgpu_server_adapter_drop(mContext.get(), aAdapterId);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvDeviceDestroy(RawId aDeviceId) {
+ ffi::wgpu_server_device_drop(mContext.get(), aDeviceId);
+ mErrorScopeMap.erase(aDeviceId);
+ return IPC_OK();
+}
+
+WebGPUParent::BufferMapData* WebGPUParent::GetBufferMapData(RawId aBufferId) {
+ const auto iter = mSharedMemoryMap.find(aBufferId);
+ if (iter == mSharedMemoryMap.end()) {
+ return nullptr;
+ }
+
+ return &iter->second;
+}
+
+ipc::IPCResult WebGPUParent::RecvCreateBuffer(
+ RawId aDeviceId, RawId aBufferId, dom::GPUBufferDescriptor&& aDesc,
+ ipc::UnsafeSharedMemoryHandle&& aShmem) {
+ webgpu::StringHelper label(aDesc.mLabel);
+
+ auto shmem =
+ ipc::WritableSharedMemoryMapping::Open(std::move(aShmem)).value();
+
+ bool hasMapFlags = aDesc.mUsage & (dom::GPUBufferUsage_Binding::MAP_WRITE |
+ dom::GPUBufferUsage_Binding::MAP_READ);
+ if (hasMapFlags || aDesc.mMappedAtCreation) {
+ uint64_t offset = 0;
+ uint64_t size = 0;
+ if (aDesc.mMappedAtCreation) {
+ size = aDesc.mSize;
+ MOZ_RELEASE_ASSERT(shmem.Size() >= aDesc.mSize);
+ }
+
+ BufferMapData data = {std::move(shmem), hasMapFlags, offset, size};
+ mSharedMemoryMap.insert({aBufferId, std::move(data)});
+ }
+
+ ErrorBuffer error;
+ ffi::wgpu_server_device_create_buffer(mContext.get(), aDeviceId, aBufferId,
+ label.Get(), aDesc.mSize, aDesc.mUsage,
+ aDesc.mMappedAtCreation, error.ToFFI());
+ ForwardError(aDeviceId, error);
+ return IPC_OK();
+}
+
+struct MapRequest {
+ RefPtr<WebGPUParent> mParent;
+ ffi::WGPUGlobal* mContext;
+ ffi::WGPUBufferId mBufferId;
+ ffi::WGPUHostMap mHostMap;
+ uint64_t mOffset;
+ uint64_t mSize;
+ WebGPUParent::BufferMapResolver mResolver;
+};
+
+nsCString MapStatusString(ffi::WGPUBufferMapAsyncStatus status) {
+ switch (status) {
+ case ffi::WGPUBufferMapAsyncStatus_Success:
+ return nsCString("Success");
+ case ffi::WGPUBufferMapAsyncStatus_AlreadyMapped:
+ return nsCString("Already mapped");
+ case ffi::WGPUBufferMapAsyncStatus_MapAlreadyPending:
+ return nsCString("Map is already pending");
+ case ffi::WGPUBufferMapAsyncStatus_Aborted:
+ return nsCString("Map aborted");
+ case ffi::WGPUBufferMapAsyncStatus_ContextLost:
+ return nsCString("Context lost");
+ case ffi::WGPUBufferMapAsyncStatus_Invalid:
+ return nsCString("Invalid buffer");
+ case ffi::WGPUBufferMapAsyncStatus_InvalidRange:
+ return nsCString("Invalid range");
+ case ffi::WGPUBufferMapAsyncStatus_InvalidAlignment:
+ return nsCString("Invalid alignment");
+ case ffi::WGPUBufferMapAsyncStatus_InvalidUsageFlags:
+ return nsCString("Invalid usage flags");
+ case ffi::WGPUBufferMapAsyncStatus_Error:
+ return nsCString("Map failed");
+ case ffi::WGPUBufferMapAsyncStatus_Sentinel: // For -Wswitch
+ break;
+ }
+
+ MOZ_CRASH("Bad ffi::WGPUBufferMapAsyncStatus");
+}
+
+static void MapCallback(ffi::WGPUBufferMapAsyncStatus status,
+ uint8_t* userdata) {
+ auto* req = reinterpret_cast<MapRequest*>(userdata);
+
+ if (!req->mParent->CanSend()) {
+ delete req;
+ return;
+ }
+
+ BufferMapResult result;
+
+ auto bufferId = req->mBufferId;
+ auto* mapData = req->mParent->GetBufferMapData(bufferId);
+ MOZ_RELEASE_ASSERT(mapData);
+
+ if (status != ffi::WGPUBufferMapAsyncStatus_Success) {
+ result = BufferMapError(MapStatusString(status));
+ } else {
+ auto size = req->mSize;
+ auto offset = req->mOffset;
+
+ if (req->mHostMap == ffi::WGPUHostMap_Read && size > 0) {
+ const auto src = ffi::wgpu_server_buffer_get_mapped_range(
+ req->mContext, req->mBufferId, offset, size);
+
+ MOZ_RELEASE_ASSERT(mapData->mShmem.Size() >= offset + size);
+ if (src.ptr != nullptr && src.length >= size) {
+ auto dst = mapData->mShmem.Bytes().Subspan(offset, size);
+ memcpy(dst.data(), src.ptr, size);
+ }
+ }
+
+ result =
+ BufferMapSuccess(offset, size, req->mHostMap == ffi::WGPUHostMap_Write);
+
+ mapData->mMappedOffset = offset;
+ mapData->mMappedSize = size;
+ }
+
+ req->mResolver(std::move(result));
+ delete req;
+}
+
+ipc::IPCResult WebGPUParent::RecvBufferMap(RawId aBufferId, uint32_t aMode,
+ uint64_t aOffset, uint64_t aSize,
+ BufferMapResolver&& aResolver) {
+ MOZ_LOG(sLogger, LogLevel::Info,
+ ("RecvBufferMap %" PRIu64 " offset=%" PRIu64 " size=%" PRIu64 "\n",
+ aBufferId, aOffset, aSize));
+
+ ffi::WGPUHostMap mode;
+ switch (aMode) {
+ case dom::GPUMapMode_Binding::READ:
+ mode = ffi::WGPUHostMap_Read;
+ break;
+ case dom::GPUMapMode_Binding::WRITE:
+ mode = ffi::WGPUHostMap_Write;
+ break;
+ default: {
+ nsCString errorString(
+ "GPUBuffer.mapAsync 'mode' argument must be either GPUMapMode.READ "
+ "or GPUMapMode.WRITE");
+ aResolver(BufferMapError(errorString));
+ return IPC_OK();
+ }
+ }
+
+ auto* mapData = GetBufferMapData(aBufferId);
+
+ if (!mapData) {
+ nsCString errorString("Buffer is not mappable");
+ aResolver(BufferMapError(errorString));
+ return IPC_OK();
+ }
+
+ auto* request =
+ new MapRequest{this, mContext.get(), aBufferId, mode,
+ aOffset, aSize, std::move(aResolver)};
+
+ ffi::WGPUBufferMapCallbackC callback = {&MapCallback,
+ reinterpret_cast<uint8_t*>(request)};
+ ffi::wgpu_server_buffer_map(mContext.get(), aBufferId, aOffset, aSize, mode,
+ callback);
+
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvBufferUnmap(RawId aDeviceId, RawId aBufferId,
+ bool aFlush) {
+ MOZ_LOG(sLogger, LogLevel::Info,
+ ("RecvBufferUnmap %" PRIu64 " flush=%d\n", aBufferId, aFlush));
+
+ auto* mapData = GetBufferMapData(aBufferId);
+
+ if (mapData && aFlush) {
+ uint64_t offset = mapData->mMappedOffset;
+ uint64_t size = mapData->mMappedSize;
+
+ const auto mapped = ffi::wgpu_server_buffer_get_mapped_range(
+ mContext.get(), aBufferId, offset, size);
+
+ if (mapped.ptr != nullptr && mapped.length >= size) {
+ auto shmSize = mapData->mShmem.Size();
+ MOZ_RELEASE_ASSERT(offset <= shmSize);
+ MOZ_RELEASE_ASSERT(size <= shmSize - offset);
+
+ auto src = mapData->mShmem.Bytes().Subspan(offset, size);
+ memcpy(mapped.ptr, src.data(), size);
+ }
+
+ mapData->mMappedOffset = 0;
+ mapData->mMappedSize = 0;
+ }
+
+ ErrorBuffer error;
+ ffi::wgpu_server_buffer_unmap(mContext.get(), aBufferId, error.ToFFI());
+ ForwardError(aDeviceId, error);
+
+ if (mapData && !mapData->mHasMapFlags) {
+ // We get here if the buffer was mapped at creation without map flags.
+ // We don't need the shared memory anymore.
+ DeallocBufferShmem(aBufferId);
+ }
+
+ return IPC_OK();
+}
+
+void WebGPUParent::DeallocBufferShmem(RawId aBufferId) {
+ const auto iter = mSharedMemoryMap.find(aBufferId);
+ if (iter != mSharedMemoryMap.end()) {
+ mSharedMemoryMap.erase(iter);
+ }
+}
+
+ipc::IPCResult WebGPUParent::RecvBufferDrop(RawId aBufferId) {
+ ffi::wgpu_server_buffer_drop(mContext.get(), aBufferId);
+ MOZ_LOG(sLogger, LogLevel::Info, ("RecvBufferDrop %" PRIu64 "\n", aBufferId));
+
+ DeallocBufferShmem(aBufferId);
+
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvBufferDestroy(RawId aBufferId) {
+ ffi::wgpu_server_buffer_destroy(mContext.get(), aBufferId);
+ MOZ_LOG(sLogger, LogLevel::Info,
+ ("RecvBufferDestroy %" PRIu64 "\n", aBufferId));
+
+ DeallocBufferShmem(aBufferId);
+
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvTextureDestroy(RawId aTextureId) {
+ ffi::wgpu_server_texture_drop(mContext.get(), aTextureId);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvTextureViewDestroy(RawId aTextureViewId) {
+ ffi::wgpu_server_texture_view_drop(mContext.get(), aTextureViewId);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvSamplerDestroy(RawId aSamplerId) {
+ ffi::wgpu_server_sampler_drop(mContext.get(), aSamplerId);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvCommandEncoderFinish(
+ RawId aEncoderId, RawId aDeviceId,
+ const dom::GPUCommandBufferDescriptor& aDesc) {
+ Unused << aDesc;
+ ffi::WGPUCommandBufferDescriptor desc = {};
+
+ webgpu::StringHelper label(aDesc.mLabel);
+ desc.label = label.Get();
+
+ ErrorBuffer error;
+ ffi::wgpu_server_encoder_finish(mContext.get(), aEncoderId, &desc,
+ error.ToFFI());
+
+ ForwardError(aDeviceId, error);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvCommandEncoderDestroy(RawId aEncoderId) {
+ ffi::wgpu_server_encoder_drop(mContext.get(), aEncoderId);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvCommandBufferDestroy(RawId aCommandBufferId) {
+ ffi::wgpu_server_command_buffer_drop(mContext.get(), aCommandBufferId);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvRenderBundleDestroy(RawId aBundleId) {
+ ffi::wgpu_server_render_bundle_drop(mContext.get(), aBundleId);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvQueueSubmit(
+ RawId aQueueId, RawId aDeviceId, const nsTArray<RawId>& aCommandBuffers) {
+ ErrorBuffer error;
+ ffi::wgpu_server_queue_submit(mContext.get(), aQueueId,
+ aCommandBuffers.Elements(),
+ aCommandBuffers.Length(), error.ToFFI());
+ ForwardError(aDeviceId, error);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvQueueWriteAction(
+ RawId aQueueId, RawId aDeviceId, const ipc::ByteBuf& aByteBuf,
+ ipc::UnsafeSharedMemoryHandle&& aShmem) {
+ auto mapping =
+ ipc::WritableSharedMemoryMapping::Open(std::move(aShmem)).value();
+
+ ErrorBuffer error;
+ ffi::wgpu_server_queue_write_action(mContext.get(), aQueueId,
+ ToFFI(&aByteBuf), mapping.Bytes().data(),
+ mapping.Size(), error.ToFFI());
+ ForwardError(aDeviceId, error);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvBindGroupLayoutDestroy(RawId aBindGroupId) {
+ ffi::wgpu_server_bind_group_layout_drop(mContext.get(), aBindGroupId);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvPipelineLayoutDestroy(RawId aLayoutId) {
+ ffi::wgpu_server_pipeline_layout_drop(mContext.get(), aLayoutId);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvBindGroupDestroy(RawId aBindGroupId) {
+ ffi::wgpu_server_bind_group_drop(mContext.get(), aBindGroupId);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvShaderModuleDestroy(RawId aModuleId) {
+ ffi::wgpu_server_shader_module_drop(mContext.get(), aModuleId);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvComputePipelineDestroy(RawId aPipelineId) {
+ ffi::wgpu_server_compute_pipeline_drop(mContext.get(), aPipelineId);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvRenderPipelineDestroy(RawId aPipelineId) {
+ ffi::wgpu_server_render_pipeline_drop(mContext.get(), aPipelineId);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvImplicitLayoutDestroy(
+ RawId aImplicitPlId, const nsTArray<RawId>& aImplicitBglIds) {
+ ffi::wgpu_server_pipeline_layout_drop(mContext.get(), aImplicitPlId);
+ for (const auto& id : aImplicitBglIds) {
+ ffi::wgpu_server_bind_group_layout_drop(mContext.get(), id);
+ }
+ return IPC_OK();
+}
+
+// TODO: proper destruction
+
+ipc::IPCResult WebGPUParent::RecvDeviceCreateSwapChain(
+ RawId aDeviceId, RawId aQueueId, const RGBDescriptor& aDesc,
+ const nsTArray<RawId>& aBufferIds,
+ const layers::RemoteTextureOwnerId& aOwnerId) {
+ switch (aDesc.format()) {
+ case gfx::SurfaceFormat::R8G8B8A8:
+ case gfx::SurfaceFormat::B8G8R8A8:
+ break;
+ default:
+ MOZ_ASSERT_UNREACHABLE("Invalid surface format!");
+ return IPC_OK();
+ }
+
+ constexpr uint32_t kBufferAlignmentMask = 0xff;
+ const auto bufferStrideWithMask = CheckedInt<uint32_t>(aDesc.size().width) *
+ gfx::BytesPerPixel(aDesc.format()) +
+ kBufferAlignmentMask;
+ if (!bufferStrideWithMask.isValid()) {
+ MOZ_ASSERT_UNREACHABLE("Invalid width / buffer stride!");
+ return IPC_OK();
+ }
+
+ const uint32_t bufferStride =
+ bufferStrideWithMask.value() & ~kBufferAlignmentMask;
+
+ const auto rows = CheckedInt<uint32_t>(aDesc.size().height);
+ if (!rows.isValid()) {
+ MOZ_ASSERT_UNREACHABLE("Invalid height!");
+ return IPC_OK();
+ }
+
+ if (!mRemoteTextureOwner) {
+ mRemoteTextureOwner =
+ MakeRefPtr<layers::RemoteTextureOwnerClient>(OtherPid());
+ }
+ // RemoteTextureMap::GetRemoteTextureForDisplayList() works synchronously.
+ mRemoteTextureOwner->RegisterTextureOwner(aOwnerId, /* aIsSyncMode */ true);
+
+ auto data = MakeRefPtr<PresentationData>(aDeviceId, aQueueId, aDesc,
+ bufferStride, aBufferIds);
+ if (!mCanvasMap.emplace(aOwnerId, data).second) {
+ NS_ERROR("External image is already registered as WebGPU canvas!");
+ }
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvDeviceCreateShaderModule(
+ RawId aDeviceId, RawId aModuleId, const nsString& aLabel,
+ const nsCString& aCode, DeviceCreateShaderModuleResolver&& aOutMessage) {
+ // TODO: this should probably be an optional label in the IPC message.
+ const nsACString* label = nullptr;
+ NS_ConvertUTF16toUTF8 utf8Label(aLabel);
+ if (!utf8Label.IsEmpty()) {
+ label = &utf8Label;
+ }
+
+ ffi::WGPUShaderModuleCompilationMessage message;
+
+ bool ok = ffi::wgpu_server_device_create_shader_module(
+ mContext.get(), aDeviceId, aModuleId, label, &aCode, &message);
+
+ nsTArray<WebGPUCompilationMessage> messages;
+
+ if (!ok) {
+ WebGPUCompilationMessage msg;
+ msg.lineNum = message.line_number;
+ msg.linePos = message.line_pos;
+ msg.offset = message.utf16_offset;
+ msg.length = message.utf16_length;
+ msg.message = message.message;
+ // wgpu currently only returns errors.
+ msg.messageType = WebGPUCompilationMessageType::Error;
+
+ messages.AppendElement(msg);
+ }
+
+ aOutMessage(messages);
+
+ return IPC_OK();
+}
+
+struct PresentRequest {
+ PresentRequest(const ffi::WGPUGlobal* aContext,
+ RefPtr<PresentationData>& aData,
+ RefPtr<layers::RemoteTextureOwnerClient>& aRemoteTextureOwner,
+ const layers::RemoteTextureId aTextureId,
+ const layers::RemoteTextureOwnerId aOwnerId)
+ : mContext(aContext),
+ mData(aData),
+ mRemoteTextureOwner(aRemoteTextureOwner),
+ mTextureId(aTextureId),
+ mOwnerId(aOwnerId) {}
+
+ const ffi::WGPUGlobal* mContext;
+ RefPtr<PresentationData> mData;
+ RefPtr<layers::RemoteTextureOwnerClient> mRemoteTextureOwner;
+ const layers::RemoteTextureId mTextureId;
+ const layers::RemoteTextureOwnerId mOwnerId;
+};
+
+static void PresentCallback(ffi::WGPUBufferMapAsyncStatus status,
+ uint8_t* userdata) {
+ UniquePtr<PresentRequest> req(reinterpret_cast<PresentRequest*>(userdata));
+
+ PresentationData* data = req->mData.get();
+ // get the buffer ID
+ RawId bufferId;
+ {
+ MutexAutoLock lock(data->mBuffersLock);
+ bufferId = data->mQueuedBufferIds.back();
+ data->mQueuedBufferIds.pop_back();
+ data->mAvailableBufferIds.push_back(bufferId);
+ }
+ MOZ_LOG(
+ sLogger, LogLevel::Info,
+ ("PresentCallback for buffer %" PRIu64 " status=%d\n", bufferId, status));
+ // copy the data
+ if (status == ffi::WGPUBufferMapAsyncStatus_Success) {
+ const auto bufferSize = data->mDesc.size().height * data->mSourcePitch;
+ const auto mapped = ffi::wgpu_server_buffer_get_mapped_range(
+ req->mContext, bufferId, 0, bufferSize);
+ MOZ_ASSERT(mapped.length >= bufferSize);
+ auto textureData =
+ req->mRemoteTextureOwner->CreateOrRecycleBufferTextureData(
+ req->mOwnerId, data->mDesc.size(), data->mDesc.format());
+ if (!textureData) {
+ gfxCriticalNoteOnce << "Failed to allocate BufferTextureData";
+ return;
+ }
+ layers::MappedTextureData mappedData;
+ if (textureData && textureData->BorrowMappedData(mappedData)) {
+ uint8_t* src = mapped.ptr;
+ uint8_t* dst = mappedData.data;
+ for (auto row = 0; row < data->mDesc.size().height; ++row) {
+ memcpy(dst, src, mappedData.stride);
+ dst += mappedData.stride;
+ src += data->mSourcePitch;
+ }
+ req->mRemoteTextureOwner->PushTexture(req->mTextureId, req->mOwnerId,
+ std::move(textureData),
+ /* aSharedSurface */ nullptr);
+ } else {
+ NS_WARNING("WebGPU present skipped: the swapchain is resized!");
+ }
+ ErrorBuffer error;
+ wgpu_server_buffer_unmap(req->mContext, bufferId, error.ToFFI());
+ if (auto errorString = error.GetError()) {
+ MOZ_LOG(
+ sLogger, LogLevel::Info,
+ ("WebGPU present: buffer unmap failed: %s\n", errorString->get()));
+ }
+ } else {
+ // TODO: better handle errors
+ NS_WARNING("WebGPU frame mapping failed!");
+ }
+}
+
+ipc::IPCResult WebGPUParent::GetFrontBufferSnapshot(
+ IProtocol* aProtocol, const layers::RemoteTextureOwnerId& aOwnerId,
+ Maybe<Shmem>& aShmem, gfx::IntSize& aSize) {
+ const auto& lookup = mCanvasMap.find(aOwnerId);
+ if (lookup == mCanvasMap.end() || !mRemoteTextureOwner) {
+ return IPC_OK();
+ }
+
+ RefPtr<PresentationData> data = lookup->second.get();
+ aSize = data->mDesc.size();
+ uint32_t stride = layers::ImageDataSerializer::ComputeRGBStride(
+ data->mDesc.format(), aSize.width);
+ uint32_t len = data->mDesc.size().height * stride;
+ Shmem shmem;
+ if (!AllocShmem(len, &shmem)) {
+ return IPC_OK();
+ }
+
+ mRemoteTextureOwner->GetLatestBufferSnapshot(aOwnerId, shmem, aSize);
+ aShmem.emplace(std::move(shmem));
+
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvSwapChainPresent(
+ RawId aTextureId, RawId aCommandEncoderId,
+ const layers::RemoteTextureId& aRemoteTextureId,
+ const layers::RemoteTextureOwnerId& aOwnerId) {
+ // step 0: get the data associated with the swapchain
+ const auto& lookup = mCanvasMap.find(aOwnerId);
+ if (lookup == mCanvasMap.end() || !mRemoteTextureOwner ||
+ !mRemoteTextureOwner->IsRegistered(aOwnerId)) {
+ NS_WARNING("WebGPU presenting on a destroyed swap chain!");
+ return IPC_OK();
+ }
+
+ RefPtr<PresentationData> data = lookup->second.get();
+ RawId bufferId = 0;
+ const auto& size = data->mDesc.size();
+ const auto bufferSize = data->mDesc.size().height * data->mSourcePitch;
+
+ // step 1: find an available staging buffer, or create one
+ {
+ MutexAutoLock lock(data->mBuffersLock);
+ if (!data->mAvailableBufferIds.empty()) {
+ bufferId = data->mAvailableBufferIds.back();
+ data->mAvailableBufferIds.pop_back();
+ } else if (!data->mUnassignedBufferIds.empty()) {
+ bufferId = data->mUnassignedBufferIds.back();
+ data->mUnassignedBufferIds.pop_back();
+
+ ffi::WGPUBufferUsages usage =
+ WGPUBufferUsages_COPY_DST | WGPUBufferUsages_MAP_READ;
+
+ ErrorBuffer error;
+ ffi::wgpu_server_device_create_buffer(mContext.get(), data->mDeviceId,
+ bufferId, nullptr, bufferSize,
+ usage, false, error.ToFFI());
+ if (ForwardError(data->mDeviceId, error)) {
+ return IPC_OK();
+ }
+ } else {
+ bufferId = 0;
+ }
+
+ if (bufferId) {
+ data->mQueuedBufferIds.insert(data->mQueuedBufferIds.begin(), bufferId);
+ }
+ }
+
+ MOZ_LOG(sLogger, LogLevel::Info,
+ ("RecvSwapChainPresent with buffer %" PRIu64 "\n", bufferId));
+ if (!bufferId) {
+ // TODO: add a warning - no buffer are available!
+ return IPC_OK();
+ }
+
+ // step 3: submit a copy command for the frame
+ ffi::WGPUCommandEncoderDescriptor encoderDesc = {};
+ {
+ ErrorBuffer error;
+ ffi::wgpu_server_device_create_encoder(mContext.get(), data->mDeviceId,
+ &encoderDesc, aCommandEncoderId,
+ error.ToFFI());
+ if (ForwardError(data->mDeviceId, error)) {
+ return IPC_OK();
+ }
+ }
+
+ const ffi::WGPUImageCopyTexture texView = {
+ aTextureId,
+ };
+ const ffi::WGPUImageDataLayout bufLayout = {
+ 0,
+ data->mSourcePitch,
+ 0,
+ };
+ const ffi::WGPUImageCopyBuffer bufView = {
+ bufferId,
+ bufLayout,
+ };
+ const ffi::WGPUExtent3d extent = {
+ static_cast<uint32_t>(size.width),
+ static_cast<uint32_t>(size.height),
+ 1,
+ };
+ ffi::wgpu_server_encoder_copy_texture_to_buffer(
+ mContext.get(), aCommandEncoderId, &texView, &bufView, &extent);
+ ffi::WGPUCommandBufferDescriptor commandDesc = {};
+ {
+ ErrorBuffer error;
+ ffi::wgpu_server_encoder_finish(mContext.get(), aCommandEncoderId,
+ &commandDesc, error.ToFFI());
+ if (ForwardError(data->mDeviceId, error)) {
+ return IPC_OK();
+ }
+ }
+
+ {
+ ErrorBuffer error;
+ ffi::wgpu_server_queue_submit(mContext.get(), data->mQueueId,
+ &aCommandEncoderId, 1, error.ToFFI());
+ if (ForwardError(data->mDeviceId, error)) {
+ return IPC_OK();
+ }
+ }
+
+ // step 4: request the pixels to be copied into the external texture
+ // TODO: this isn't strictly necessary. When WR wants to Lock() the external
+ // texture,
+ // we can just give it the contents of the last mapped buffer instead of the
+ // copy.
+ auto presentRequest = MakeUnique<PresentRequest>(
+ mContext.get(), data, mRemoteTextureOwner, aRemoteTextureId, aOwnerId);
+
+ ffi::WGPUBufferMapCallbackC callback = {
+ &PresentCallback, reinterpret_cast<uint8_t*>(presentRequest.release())};
+ ffi::wgpu_server_buffer_map(mContext.get(), bufferId, 0, bufferSize,
+ ffi::WGPUHostMap_Read, callback);
+
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvSwapChainDestroy(
+ const layers::RemoteTextureOwnerId& aOwnerId) {
+ if (mRemoteTextureOwner) {
+ mRemoteTextureOwner->UnregisterTextureOwner(aOwnerId);
+ }
+ const auto& lookup = mCanvasMap.find(aOwnerId);
+ MOZ_ASSERT(lookup != mCanvasMap.end());
+ if (lookup == mCanvasMap.end()) {
+ NS_WARNING("WebGPU presenting on a destroyed swap chain!");
+ return IPC_OK();
+ }
+
+ RefPtr<PresentationData> data = lookup->second.get();
+ mCanvasMap.erase(lookup);
+
+ MutexAutoLock lock(data->mBuffersLock);
+ ipc::ByteBuf dropByteBuf;
+ for (const auto bid : data->mUnassignedBufferIds) {
+ wgpu_server_buffer_free(bid, ToFFI(&dropByteBuf));
+ }
+ if (dropByteBuf.mData && !SendDropAction(std::move(dropByteBuf))) {
+ NS_WARNING("Unable to free an ID for non-assigned buffer");
+ }
+ for (const auto bid : data->mAvailableBufferIds) {
+ ffi::wgpu_server_buffer_drop(mContext.get(), bid);
+ }
+ for (const auto bid : data->mQueuedBufferIds) {
+ ffi::wgpu_server_buffer_drop(mContext.get(), bid);
+ }
+ return IPC_OK();
+}
+
+void WebGPUParent::ActorDestroy(ActorDestroyReason aWhy) {
+ mTimer.Stop();
+ mCanvasMap.clear();
+ if (mRemoteTextureOwner) {
+ mRemoteTextureOwner->UnregisterAllTextureOwners();
+ mRemoteTextureOwner = nullptr;
+ }
+ ffi::wgpu_server_poll_all_devices(mContext.get(), true);
+ mContext = nullptr;
+}
+
+ipc::IPCResult WebGPUParent::RecvDeviceAction(RawId aDeviceId,
+ const ipc::ByteBuf& aByteBuf) {
+ ErrorBuffer error;
+ ffi::wgpu_server_device_action(mContext.get(), aDeviceId, ToFFI(&aByteBuf),
+ error.ToFFI());
+
+ ForwardError(aDeviceId, error);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvDeviceActionWithAck(
+ RawId aDeviceId, const ipc::ByteBuf& aByteBuf,
+ DeviceActionWithAckResolver&& aResolver) {
+ ErrorBuffer error;
+ ffi::wgpu_server_device_action(mContext.get(), aDeviceId, ToFFI(&aByteBuf),
+ error.ToFFI());
+
+ ForwardError(aDeviceId, error);
+ aResolver(true);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvTextureAction(RawId aTextureId,
+ RawId aDeviceId,
+ const ipc::ByteBuf& aByteBuf) {
+ ErrorBuffer error;
+ ffi::wgpu_server_texture_action(mContext.get(), aTextureId, ToFFI(&aByteBuf),
+ error.ToFFI());
+
+ ForwardError(aDeviceId, error);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvCommandEncoderAction(
+ RawId aEncoderId, RawId aDeviceId, const ipc::ByteBuf& aByteBuf) {
+ ErrorBuffer error;
+ ffi::wgpu_server_command_encoder_action(mContext.get(), aEncoderId,
+ ToFFI(&aByteBuf), error.ToFFI());
+ ForwardError(aDeviceId, error);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvBumpImplicitBindGroupLayout(RawId aPipelineId,
+ bool aIsCompute,
+ uint32_t aIndex,
+ RawId aAssignId) {
+ ErrorBuffer error;
+ if (aIsCompute) {
+ ffi::wgpu_server_compute_pipeline_get_bind_group_layout(
+ mContext.get(), aPipelineId, aIndex, aAssignId, error.ToFFI());
+ } else {
+ ffi::wgpu_server_render_pipeline_get_bind_group_layout(
+ mContext.get(), aPipelineId, aIndex, aAssignId, error.ToFFI());
+ }
+
+ ForwardError(0, error);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvDevicePushErrorScope(RawId aDeviceId) {
+ const auto& lookup = mErrorScopeMap.find(aDeviceId);
+ if (lookup == mErrorScopeMap.end()) {
+ NS_WARNING("WebGPU error scopes on a destroyed device!");
+ return IPC_OK();
+ }
+
+ lookup->second.mStack.EmplaceBack();
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvDevicePopErrorScope(
+ RawId aDeviceId, DevicePopErrorScopeResolver&& aResolver) {
+ const auto& lookup = mErrorScopeMap.find(aDeviceId);
+ if (lookup == mErrorScopeMap.end()) {
+ NS_WARNING("WebGPU error scopes on a destroyed device!");
+ ScopedError error = {true};
+ aResolver(Some(error));
+ return IPC_OK();
+ }
+
+ if (lookup->second.mStack.IsEmpty()) {
+ NS_WARNING("WebGPU no error scope to pop!");
+ ScopedError error = {true};
+ aResolver(Some(error));
+ return IPC_OK();
+ }
+
+ auto scope = lookup->second.mStack.PopLastElement();
+ aResolver(scope);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvGenerateError(RawId aDeviceId,
+ const nsCString& aMessage) {
+ ReportError(aDeviceId, aMessage);
+ return IPC_OK();
+}
+
+} // namespace mozilla::webgpu
diff --git a/dom/webgpu/ipc/WebGPUParent.h b/dom/webgpu/ipc/WebGPUParent.h
new file mode 100644
index 0000000000..384d560003
--- /dev/null
+++ b/dom/webgpu/ipc/WebGPUParent.h
@@ -0,0 +1,156 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef WEBGPU_PARENT_H_
+#define WEBGPU_PARENT_H_
+
+#include "mozilla/webgpu/ffi/wgpu.h"
+#include "mozilla/webgpu/PWebGPUParent.h"
+#include "mozilla/webrender/WebRenderAPI.h"
+#include "mozilla/ipc/RawShmem.h"
+#include "WebGPUTypes.h"
+#include "base/timer.h"
+
+namespace mozilla {
+
+namespace layers {
+class RemoteTextureOwnerClient;
+} // namespace layers
+
+namespace webgpu {
+
+class ErrorBuffer;
+class PresentationData;
+
+struct ErrorScopeStack {
+ nsTArray<MaybeScopedError> mStack;
+};
+
+class WebGPUParent final : public PWebGPUParent {
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(WebGPUParent, override)
+
+ public:
+ explicit WebGPUParent();
+
+ ipc::IPCResult RecvInstanceRequestAdapter(
+ const dom::GPURequestAdapterOptions& aOptions,
+ const nsTArray<RawId>& aTargetIds,
+ InstanceRequestAdapterResolver&& resolver);
+ ipc::IPCResult RecvAdapterRequestDevice(
+ RawId aAdapterId, const ipc::ByteBuf& aByteBuf, RawId aDeviceId,
+ AdapterRequestDeviceResolver&& resolver);
+ ipc::IPCResult RecvAdapterDestroy(RawId aAdapterId);
+ ipc::IPCResult RecvDeviceDestroy(RawId aDeviceId);
+ ipc::IPCResult RecvCreateBuffer(RawId aDeviceId, RawId aBufferId,
+ dom::GPUBufferDescriptor&& aDesc,
+ ipc::UnsafeSharedMemoryHandle&& aShmem);
+ ipc::IPCResult RecvBufferMap(RawId aBufferId, uint32_t aMode,
+ uint64_t aOffset, uint64_t size,
+ BufferMapResolver&& aResolver);
+ ipc::IPCResult RecvBufferUnmap(RawId aDeviceId, RawId aBufferId, bool aFlush);
+ ipc::IPCResult RecvBufferDestroy(RawId aBufferId);
+ ipc::IPCResult RecvBufferDrop(RawId aBufferId);
+ ipc::IPCResult RecvTextureDestroy(RawId aTextureId);
+ ipc::IPCResult RecvTextureViewDestroy(RawId aTextureViewId);
+ ipc::IPCResult RecvSamplerDestroy(RawId aSamplerId);
+ ipc::IPCResult RecvCommandEncoderFinish(
+ RawId aEncoderId, RawId aDeviceId,
+ const dom::GPUCommandBufferDescriptor& aDesc);
+ ipc::IPCResult RecvCommandEncoderDestroy(RawId aEncoderId);
+ ipc::IPCResult RecvCommandBufferDestroy(RawId aCommandBufferId);
+ ipc::IPCResult RecvRenderBundleDestroy(RawId aBundleId);
+ ipc::IPCResult RecvQueueSubmit(RawId aQueueId, RawId aDeviceId,
+ const nsTArray<RawId>& aCommandBuffers);
+ ipc::IPCResult RecvQueueWriteAction(RawId aQueueId, RawId aDeviceId,
+ const ipc::ByteBuf& aByteBuf,
+ ipc::UnsafeSharedMemoryHandle&& aShmem);
+ ipc::IPCResult RecvBindGroupLayoutDestroy(RawId aBindGroupLayoutId);
+ ipc::IPCResult RecvPipelineLayoutDestroy(RawId aPipelineLayoutId);
+ ipc::IPCResult RecvBindGroupDestroy(RawId aBindGroupId);
+ ipc::IPCResult RecvShaderModuleDestroy(RawId aModuleId);
+ ipc::IPCResult RecvComputePipelineDestroy(RawId aPipelineId);
+ ipc::IPCResult RecvRenderPipelineDestroy(RawId aPipelineId);
+ ipc::IPCResult RecvImplicitLayoutDestroy(
+ RawId aImplicitPlId, const nsTArray<RawId>& aImplicitBglIds);
+ ipc::IPCResult RecvDeviceCreateSwapChain(
+ RawId aDeviceId, RawId aQueueId, const layers::RGBDescriptor& aDesc,
+ const nsTArray<RawId>& aBufferIds,
+ const layers::RemoteTextureOwnerId& aOwnerId);
+ ipc::IPCResult RecvDeviceCreateShaderModule(
+ RawId aDeviceId, RawId aModuleId, const nsString& aLabel,
+ const nsCString& aCode, DeviceCreateShaderModuleResolver&& aOutMessage);
+
+ ipc::IPCResult RecvSwapChainPresent(
+ RawId aTextureId, RawId aCommandEncoderId,
+ const layers::RemoteTextureId& aRemoteTextureId,
+ const layers::RemoteTextureOwnerId& aOwnerId);
+ ipc::IPCResult RecvSwapChainDestroy(
+ const layers::RemoteTextureOwnerId& aOwnerId);
+
+ ipc::IPCResult RecvDeviceAction(RawId aDeviceId,
+ const ipc::ByteBuf& aByteBuf);
+ ipc::IPCResult RecvDeviceActionWithAck(
+ RawId aDeviceId, const ipc::ByteBuf& aByteBuf,
+ DeviceActionWithAckResolver&& aResolver);
+ ipc::IPCResult RecvTextureAction(RawId aTextureId, RawId aDevice,
+ const ipc::ByteBuf& aByteBuf);
+ ipc::IPCResult RecvCommandEncoderAction(RawId aEncoderId, RawId aDeviceId,
+ const ipc::ByteBuf& aByteBuf);
+ ipc::IPCResult RecvBumpImplicitBindGroupLayout(RawId aPipelineId,
+ bool aIsCompute,
+ uint32_t aIndex,
+ RawId aAssignId);
+
+ ipc::IPCResult RecvDevicePushErrorScope(RawId aDeviceId);
+ ipc::IPCResult RecvDevicePopErrorScope(
+ RawId aDeviceId, DevicePopErrorScopeResolver&& aResolver);
+ ipc::IPCResult RecvGenerateError(RawId aDeviceId, const nsCString& message);
+
+ ipc::IPCResult GetFrontBufferSnapshot(
+ IProtocol* aProtocol, const layers::RemoteTextureOwnerId& aOwnerId,
+ Maybe<Shmem>& aShmem, gfx::IntSize& aSize);
+
+ void ActorDestroy(ActorDestroyReason aWhy) override;
+
+ struct BufferMapData {
+ ipc::WritableSharedMemoryMapping mShmem;
+ // True if buffer's usage has MAP_READ or MAP_WRITE set.
+ bool mHasMapFlags;
+ uint64_t mMappedOffset;
+ uint64_t mMappedSize;
+ };
+
+ BufferMapData* GetBufferMapData(RawId aBufferId);
+
+ private:
+ void DeallocBufferShmem(RawId aBufferId);
+
+ virtual ~WebGPUParent();
+ void MaintainDevices();
+ bool ForwardError(RawId aDeviceId, ErrorBuffer& aError);
+ void ReportError(RawId aDeviceId, const nsCString& message);
+
+ UniquePtr<ffi::WGPUGlobal> mContext;
+ base::RepeatingTimer<WebGPUParent> mTimer;
+
+ /// A map from wgpu buffer ids to data about their shared memory segments.
+ /// Includes entries about mappedAtCreation, MAP_READ and MAP_WRITE buffers,
+ /// regardless of their state.
+ std::unordered_map<uint64_t, BufferMapData> mSharedMemoryMap;
+ /// Associated presentation data for each swapchain.
+ std::unordered_map<layers::RemoteTextureOwnerId, RefPtr<PresentationData>,
+ layers::RemoteTextureOwnerId::HashFn>
+ mCanvasMap;
+
+ RefPtr<layers::RemoteTextureOwnerClient> mRemoteTextureOwner;
+
+ /// Associated stack of error scopes for each device.
+ std::unordered_map<uint64_t, ErrorScopeStack> mErrorScopeMap;
+};
+
+} // namespace webgpu
+} // namespace mozilla
+
+#endif // WEBGPU_PARENT_H_
diff --git a/dom/webgpu/ipc/WebGPUSerialize.h b/dom/webgpu/ipc/WebGPUSerialize.h
new file mode 100644
index 0000000000..b130fc992e
--- /dev/null
+++ b/dom/webgpu/ipc/WebGPUSerialize.h
@@ -0,0 +1,50 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef WEBGPU_SERIALIZE_H_
+#define WEBGPU_SERIALIZE_H_
+
+#include "WebGPUTypes.h"
+#include "ipc/EnumSerializer.h"
+#include "ipc/IPCMessageUtils.h"
+#include "mozilla/dom/WebGPUBinding.h"
+#include "mozilla/webgpu/ffi/wgpu.h"
+
+namespace IPC {
+
+#define DEFINE_IPC_SERIALIZER_ENUM_GUARD(something, guard) \
+ template <> \
+ struct ParamTraits<something> \
+ : public ContiguousEnumSerializer<something, something(0), guard> {}
+
+#define DEFINE_IPC_SERIALIZER_DOM_ENUM(something) \
+ DEFINE_IPC_SERIALIZER_ENUM_GUARD(something, something::EndGuard_)
+#define DEFINE_IPC_SERIALIZER_FFI_ENUM(something) \
+ DEFINE_IPC_SERIALIZER_ENUM_GUARD(something, something##_Sentinel)
+
+DEFINE_IPC_SERIALIZER_DOM_ENUM(mozilla::dom::GPUPowerPreference);
+
+DEFINE_IPC_SERIALIZER_FFI_ENUM(mozilla::webgpu::ffi::WGPUHostMap);
+
+DEFINE_IPC_SERIALIZER_WITHOUT_FIELDS(mozilla::dom::GPUCommandBufferDescriptor);
+
+DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::dom::GPURequestAdapterOptions,
+ mPowerPreference, mForceFallbackAdapter);
+
+DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::dom::GPUBufferDescriptor, mSize,
+ mUsage, mMappedAtCreation);
+
+DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::webgpu::ScopedError, operationError,
+ validationMessage);
+
+DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::webgpu::WebGPUCompilationMessage,
+ message, lineNum, linePos);
+
+#undef DEFINE_IPC_SERIALIZER_FFI_ENUM
+#undef DEFINE_IPC_SERIALIZER_DOM_ENUM
+#undef DEFINE_IPC_SERIALIZER_ENUM_GUARD
+
+} // namespace IPC
+#endif // WEBGPU_SERIALIZE_H_
diff --git a/dom/webgpu/ipc/WebGPUTypes.h b/dom/webgpu/ipc/WebGPUTypes.h
new file mode 100644
index 0000000000..e607e03b99
--- /dev/null
+++ b/dom/webgpu/ipc/WebGPUTypes.h
@@ -0,0 +1,69 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef WEBGPU_TYPES_H_
+#define WEBGPU_TYPES_H_
+
+#include <cstdint>
+#include "mozilla/Maybe.h"
+#include "nsString.h"
+#include "mozilla/dom/BindingDeclarations.h"
+
+namespace mozilla::webgpu {
+
+using RawId = uint64_t;
+using BufferAddress = uint64_t;
+
+struct ScopedError {
+ // Did an error occur as a result the attempt to retrieve an error
+ // (e.g. from a dead device, from an empty scope stack)?
+ bool operationError = false;
+
+ // If non-empty, the first error generated when this scope was on
+ // the top of the stack. This is interpreted as UTF-8.
+ nsCString validationMessage;
+};
+using MaybeScopedError = Maybe<ScopedError>;
+
+enum class WebGPUCompilationMessageType { Error, Warning, Info };
+
+// TODO: Better name? CompilationMessage alread taken by the dom object.
+/// The serializable counterpart of the dom object CompilationMessage.
+struct WebGPUCompilationMessage {
+ nsString message;
+ uint64_t lineNum = 0;
+ uint64_t linePos = 0;
+ // In utf16 code units.
+ uint64_t offset = 0;
+ // In utf16 code units.
+ uint64_t length = 0;
+ WebGPUCompilationMessageType messageType =
+ WebGPUCompilationMessageType::Error;
+};
+
+/// A helper to reduce the boiler plate of turning the many Optional<nsAString>
+/// we get from the dom to the nullable nsACString* we pass to the wgpu ffi.
+class StringHelper {
+ public:
+ explicit StringHelper(const dom::Optional<nsString>& aWide) {
+ if (aWide.WasPassed()) {
+ mNarrow = Some(NS_ConvertUTF16toUTF8(aWide.Value()));
+ }
+ }
+
+ const nsACString* Get() const {
+ if (mNarrow.isSome()) {
+ return mNarrow.ptr();
+ }
+ return nullptr;
+ }
+
+ private:
+ Maybe<NS_ConvertUTF16toUTF8> mNarrow;
+};
+
+} // namespace mozilla::webgpu
+
+#endif // WEBGPU_TYPES_H_