summaryrefslogtreecommitdiffstats
path: root/dom/webgpu
diff options
context:
space:
mode:
Diffstat (limited to 'dom/webgpu')
-rw-r--r--dom/webgpu/Adapter.cpp51
-rw-r--r--dom/webgpu/Adapter.h54
-rw-r--r--dom/webgpu/BindGroup.cpp34
-rw-r--r--dom/webgpu/BindGroup.h35
-rw-r--r--dom/webgpu/BindGroupLayout.cpp34
-rw-r--r--dom/webgpu/BindGroupLayout.h35
-rw-r--r--dom/webgpu/Buffer.cpp169
-rw-r--r--dom/webgpu/Buffer.h71
-rw-r--r--dom/webgpu/CanvasContext.cpp125
-rw-r--r--dom/webgpu/CanvasContext.h114
-rw-r--r--dom/webgpu/CommandBuffer.cpp52
-rw-r--r--dom/webgpu/CommandBuffer.h43
-rw-r--r--dom/webgpu/CommandEncoder.cpp220
-rw-r--r--dom/webgpu/CommandEncoder.h102
-rw-r--r--dom/webgpu/ComputePassEncoder.cpp88
-rw-r--r--dom/webgpu/ComputePassEncoder.h61
-rw-r--r--dom/webgpu/ComputePipeline.cpp44
-rw-r--r--dom/webgpu/ComputePipeline.h41
-rw-r--r--dom/webgpu/Device.cpp244
-rw-r--r--dom/webgpu/Device.h141
-rw-r--r--dom/webgpu/DeviceLostInfo.cpp16
-rw-r--r--dom/webgpu/DeviceLostInfo.h33
-rw-r--r--dom/webgpu/Fence.cpp17
-rw-r--r--dom/webgpu/Fence.h36
-rw-r--r--dom/webgpu/Instance.cpp81
-rw-r--r--dom/webgpu/Instance.h53
-rw-r--r--dom/webgpu/ObjectModel.cpp38
-rw-r--r--dom/webgpu/ObjectModel.h97
-rw-r--r--dom/webgpu/OutOfMemoryError.cpp19
-rw-r--r--dom/webgpu/OutOfMemoryError.h35
-rw-r--r--dom/webgpu/PipelineLayout.cpp34
-rw-r--r--dom/webgpu/PipelineLayout.h35
-rw-r--r--dom/webgpu/Queue.cpp126
-rw-r--r--dom/webgpu/Queue.h65
-rw-r--r--dom/webgpu/RenderBundle.cpp20
-rw-r--r--dom/webgpu/RenderBundle.h31
-rw-r--r--dom/webgpu/RenderBundleEncoder.cpp18
-rw-r--r--dom/webgpu/RenderBundleEncoder.h34
-rw-r--r--dom/webgpu/RenderPassEncoder.cpp227
-rw-r--r--dom/webgpu/RenderPassEncoder.h79
-rw-r--r--dom/webgpu/RenderPipeline.cpp44
-rw-r--r--dom/webgpu/RenderPipeline.h41
-rw-r--r--dom/webgpu/Sampler.cpp34
-rw-r--r--dom/webgpu/Sampler.h34
-rw-r--r--dom/webgpu/ShaderModule.cpp34
-rw-r--r--dom/webgpu/ShaderModule.h34
-rw-r--r--dom/webgpu/SwapChain.cpp51
-rw-r--r--dom/webgpu/SwapChain.h50
-rw-r--r--dom/webgpu/Texture.cpp103
-rw-r--r--dom/webgpu/Texture.h56
-rw-r--r--dom/webgpu/TextureView.cpp39
-rw-r--r--dom/webgpu/TextureView.h39
-rw-r--r--dom/webgpu/ValidationError.cpp31
-rw-r--r--dom/webgpu/ValidationError.h40
-rw-r--r--dom/webgpu/ffi/wgpu_ffi_generated.h2967
-rw-r--r--dom/webgpu/ipc/PWebGPU.ipdl94
-rw-r--r--dom/webgpu/ipc/WebGPUChild.cpp697
-rw-r--r--dom/webgpu/ipc/WebGPUChild.h125
-rw-r--r--dom/webgpu/ipc/WebGPUParent.cpp713
-rw-r--r--dom/webgpu/ipc/WebGPUParent.h99
-rw-r--r--dom/webgpu/ipc/WebGPUSerialize.h53
-rw-r--r--dom/webgpu/ipc/WebGPUTypes.h20
-rw-r--r--dom/webgpu/mochitest/mochitest-no-pref.ini4
-rw-r--r--dom/webgpu/mochitest/mochitest.ini13
-rw-r--r--dom/webgpu/mochitest/test_buffer_mapping.html39
-rw-r--r--dom/webgpu/mochitest/test_command_buffer_creation.html26
-rw-r--r--dom/webgpu/mochitest/test_device_creation.html24
-rw-r--r--dom/webgpu/mochitest/test_disabled.html16
-rw-r--r--dom/webgpu/mochitest/test_enabled.html16
-rw-r--r--dom/webgpu/mochitest/test_queue_write.html31
-rw-r--r--dom/webgpu/mochitest/test_submit_compute_empty.html29
-rw-r--r--dom/webgpu/mochitest/test_submit_render_empty.html43
-rw-r--r--dom/webgpu/moz.build70
73 files changed, 8561 insertions, 0 deletions
diff --git a/dom/webgpu/Adapter.cpp b/dom/webgpu/Adapter.cpp
new file mode 100644
index 0000000000..419890fb26
--- /dev/null
+++ b/dom/webgpu/Adapter.cpp
@@ -0,0 +1,51 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/dom/WebGPUBinding.h"
+#include "Adapter.h"
+
+#include "Device.h"
+#include "Instance.h"
+#include "ipc/WebGPUChild.h"
+#include "mozilla/dom/Promise.h"
+
+namespace mozilla {
+namespace webgpu {
+
+GPU_IMPL_CYCLE_COLLECTION(Adapter, mParent, mBridge)
+GPU_IMPL_JS_WRAP(Adapter)
+
+Adapter::Adapter(Instance* const aParent, RawId aId)
+ : ChildOf(aParent), mBridge(aParent->mBridge), mId(aId) {}
+
+Adapter::~Adapter() { Cleanup(); }
+
+void Adapter::Cleanup() {
+ if (mValid && mBridge && mBridge->IsOpen()) {
+ mValid = false;
+ mBridge->SendAdapterDestroy(mId);
+ }
+}
+
+already_AddRefed<dom::Promise> Adapter::RequestDevice(
+ const dom::GPUDeviceDescriptor& aDesc, ErrorResult& aRv) {
+ RefPtr<dom::Promise> promise = dom::Promise::Create(GetParentObject(), aRv);
+ if (NS_WARN_IF(aRv.Failed())) {
+ return nullptr;
+ }
+
+ Maybe<RawId> id = mBridge->AdapterRequestDevice(mId, aDesc);
+ if (id.isSome()) {
+ RefPtr<Device> device = new Device(this, id.value());
+ promise->MaybeResolve(device);
+ } else {
+ promise->MaybeRejectWithNotSupportedError("Unable to instanciate a Device");
+ }
+
+ return promise.forget();
+}
+
+} // namespace webgpu
+} // namespace mozilla
diff --git a/dom/webgpu/Adapter.h b/dom/webgpu/Adapter.h
new file mode 100644
index 0000000000..9deb48ae84
--- /dev/null
+++ b/dom/webgpu/Adapter.h
@@ -0,0 +1,54 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_Adapter_H_
+#define GPU_Adapter_H_
+
+#include "mozilla/AlreadyAddRefed.h"
+#include "mozilla/webgpu/WebGPUTypes.h"
+#include "nsString.h"
+#include "ObjectModel.h"
+
+namespace mozilla {
+class ErrorResult;
+namespace dom {
+class Promise;
+struct GPUDeviceDescriptor;
+struct GPUExtensions;
+struct GPUFeatures;
+} // namespace dom
+
+namespace webgpu {
+class Device;
+class Instance;
+class WebGPUChild;
+
+class Adapter final : public ObjectBase, public ChildOf<Instance> {
+ public:
+ GPU_DECL_CYCLE_COLLECTION(Adapter)
+ GPU_DECL_JS_WRAP(Adapter)
+
+ RefPtr<WebGPUChild> mBridge;
+
+ private:
+ Adapter() = delete;
+ ~Adapter();
+ void Cleanup();
+
+ const RawId mId;
+ const nsString mName;
+
+ public:
+ explicit Adapter(Instance* const aParent, RawId aId);
+ void GetName(nsString& out) const { out = mName; }
+
+ already_AddRefed<dom::Promise> RequestDevice(
+ const dom::GPUDeviceDescriptor& aDesc, ErrorResult& aRv);
+};
+
+} // namespace webgpu
+} // namespace mozilla
+
+#endif // GPU_Adapter_H_
diff --git a/dom/webgpu/BindGroup.cpp b/dom/webgpu/BindGroup.cpp
new file mode 100644
index 0000000000..e85ed58d26
--- /dev/null
+++ b/dom/webgpu/BindGroup.cpp
@@ -0,0 +1,34 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/dom/WebGPUBinding.h"
+#include "BindGroup.h"
+#include "ipc/WebGPUChild.h"
+
+#include "Device.h"
+
+namespace mozilla {
+namespace webgpu {
+
+GPU_IMPL_CYCLE_COLLECTION(BindGroup, mParent)
+GPU_IMPL_JS_WRAP(BindGroup)
+
+BindGroup::BindGroup(Device* const aParent, RawId aId)
+ : ChildOf(aParent), mId(aId) {}
+
+BindGroup::~BindGroup() { Cleanup(); }
+
+void BindGroup::Cleanup() {
+ if (mValid && mParent) {
+ mValid = false;
+ auto bridge = mParent->GetBridge();
+ if (bridge && bridge->IsOpen()) {
+ bridge->SendBindGroupDestroy(mId);
+ }
+ }
+}
+
+} // namespace webgpu
+} // namespace mozilla
diff --git a/dom/webgpu/BindGroup.h b/dom/webgpu/BindGroup.h
new file mode 100644
index 0000000000..8ebc2f3754
--- /dev/null
+++ b/dom/webgpu/BindGroup.h
@@ -0,0 +1,35 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_BindGroup_H_
+#define GPU_BindGroup_H_
+
+#include "nsWrapperCache.h"
+#include "ObjectModel.h"
+#include "mozilla/webgpu/WebGPUTypes.h"
+
+namespace mozilla {
+namespace webgpu {
+
+class Device;
+
+class BindGroup final : public ObjectBase, public ChildOf<Device> {
+ public:
+ GPU_DECL_CYCLE_COLLECTION(BindGroup)
+ GPU_DECL_JS_WRAP(BindGroup)
+
+ BindGroup(Device* const aParent, RawId aId);
+
+ const RawId mId;
+
+ private:
+ ~BindGroup();
+ void Cleanup();
+};
+
+} // namespace webgpu
+} // namespace mozilla
+
+#endif // GPU_BindGroup_H_
diff --git a/dom/webgpu/BindGroupLayout.cpp b/dom/webgpu/BindGroupLayout.cpp
new file mode 100644
index 0000000000..1501b358ed
--- /dev/null
+++ b/dom/webgpu/BindGroupLayout.cpp
@@ -0,0 +1,34 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/dom/WebGPUBinding.h"
+#include "BindGroupLayout.h"
+#include "ipc/WebGPUChild.h"
+
+#include "Device.h"
+
+namespace mozilla {
+namespace webgpu {
+
+GPU_IMPL_CYCLE_COLLECTION(BindGroupLayout, mParent)
+GPU_IMPL_JS_WRAP(BindGroupLayout)
+
+BindGroupLayout::BindGroupLayout(Device* const aParent, RawId aId)
+ : ChildOf(aParent), mId(aId) {}
+
+BindGroupLayout::~BindGroupLayout() { Cleanup(); }
+
+void BindGroupLayout::Cleanup() {
+ if (mValid && mParent) {
+ mValid = false;
+ auto bridge = mParent->GetBridge();
+ if (bridge && bridge->IsOpen()) {
+ bridge->SendBindGroupLayoutDestroy(mId);
+ }
+ }
+}
+
+} // namespace webgpu
+} // namespace mozilla
diff --git a/dom/webgpu/BindGroupLayout.h b/dom/webgpu/BindGroupLayout.h
new file mode 100644
index 0000000000..c9d2e2e52b
--- /dev/null
+++ b/dom/webgpu/BindGroupLayout.h
@@ -0,0 +1,35 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_BindGroupLayout_H_
+#define GPU_BindGroupLayout_H_
+
+#include "nsWrapperCache.h"
+#include "ObjectModel.h"
+#include "mozilla/webgpu/WebGPUTypes.h"
+
+namespace mozilla {
+namespace webgpu {
+
+class Device;
+
+class BindGroupLayout final : public ObjectBase, public ChildOf<Device> {
+ public:
+ GPU_DECL_CYCLE_COLLECTION(BindGroupLayout)
+ GPU_DECL_JS_WRAP(BindGroupLayout)
+
+ BindGroupLayout(Device* const aParent, RawId aId);
+
+ const RawId mId;
+
+ private:
+ ~BindGroupLayout();
+ void Cleanup();
+};
+
+} // namespace webgpu
+} // namespace mozilla
+
+#endif // GPU_BindGroupLayout_H_
diff --git a/dom/webgpu/Buffer.cpp b/dom/webgpu/Buffer.cpp
new file mode 100644
index 0000000000..7ccb5e88c2
--- /dev/null
+++ b/dom/webgpu/Buffer.cpp
@@ -0,0 +1,169 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/dom/WebGPUBinding.h"
+#include "Buffer.h"
+
+#include "mozilla/dom/Promise.h"
+#include "mozilla/dom/ScriptSettings.h"
+#include "mozilla/HoldDropJSObjects.h"
+#include "mozilla/ipc/Shmem.h"
+#include "ipc/WebGPUChild.h"
+#include "js/RootingAPI.h"
+#include "nsContentUtils.h"
+#include "nsWrapperCache.h"
+#include "Device.h"
+
+namespace mozilla {
+namespace webgpu {
+
+GPU_IMPL_JS_WRAP(Buffer)
+
+NS_IMPL_CYCLE_COLLECTION_ROOT_NATIVE(Buffer, AddRef)
+NS_IMPL_CYCLE_COLLECTION_UNROOT_NATIVE(Buffer, Release)
+NS_IMPL_CYCLE_COLLECTION_CLASS(Buffer)
+NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(Buffer)
+ tmp->Cleanup();
+ NS_IMPL_CYCLE_COLLECTION_UNLINK(mParent)
+ NS_IMPL_CYCLE_COLLECTION_UNLINK_PRESERVED_WRAPPER
+NS_IMPL_CYCLE_COLLECTION_UNLINK_END
+NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN(Buffer)
+ NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mParent)
+NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END
+NS_IMPL_CYCLE_COLLECTION_TRACE_BEGIN(Buffer)
+ NS_IMPL_CYCLE_COLLECTION_TRACE_PRESERVED_WRAPPER
+ if (tmp->mMapped) {
+ for (uint32_t i = 0; i < tmp->mMapped->mArrayBuffers.Length(); ++i) {
+ NS_IMPL_CYCLE_COLLECTION_TRACE_JS_MEMBER_CALLBACK(
+ mMapped->mArrayBuffers[i])
+ }
+ }
+NS_IMPL_CYCLE_COLLECTION_TRACE_END
+
+Buffer::Buffer(Device* const aParent, RawId aId, BufferAddress aSize)
+ : ChildOf(aParent), mId(aId), mSize(aSize) {
+ mozilla::HoldJSObjects(this);
+}
+
+Buffer::~Buffer() {
+ Cleanup();
+ mozilla::DropJSObjects(this);
+}
+
+void Buffer::Cleanup() {
+ if (mValid && mParent) {
+ mValid = false;
+ auto bridge = mParent->GetBridge();
+ if (bridge && bridge->IsOpen()) {
+ bridge->SendBufferDestroy(mId);
+ }
+ if (bridge && mMapped) {
+ bridge->DeallocShmem(mMapped->mShmem);
+ }
+ }
+}
+
+void Buffer::SetMapped(ipc::Shmem&& aShmem, bool aWritable) {
+ MOZ_ASSERT(!mMapped);
+ mMapped.emplace();
+ mMapped->mShmem = std::move(aShmem);
+ mMapped->mWritable = aWritable;
+}
+
+already_AddRefed<dom::Promise> Buffer::MapAsync(
+ uint32_t aMode, uint64_t aOffset, const dom::Optional<uint64_t>& aSize,
+ ErrorResult& aRv) {
+ RefPtr<dom::Promise> promise = dom::Promise::Create(GetParentObject(), aRv);
+ if (NS_WARN_IF(aRv.Failed())) {
+ return nullptr;
+ }
+ if (mMapped) {
+ aRv.ThrowInvalidStateError("Unable to map a buffer that is already mapped");
+ return nullptr;
+ }
+ // Initialize with a dummy shmem, it will become real after the promise is
+ // resolved.
+ SetMapped(ipc::Shmem(), aMode == dom::GPUMapMode_Binding::WRITE);
+
+ const auto checked = aSize.WasPassed() ? CheckedInt<size_t>(aSize.Value())
+ : CheckedInt<size_t>(mSize) - aOffset;
+ if (!checked.isValid()) {
+ aRv.ThrowRangeError("Mapped size is too large");
+ return nullptr;
+ }
+
+ const auto& size = checked.value();
+ RefPtr<Buffer> self(this);
+
+ auto mappingPromise = mParent->MapBufferAsync(mId, aMode, aOffset, size, aRv);
+ if (!mappingPromise) {
+ return nullptr;
+ }
+
+ mappingPromise->Then(
+ GetMainThreadSerialEventTarget(), __func__,
+ [promise, self](ipc::Shmem&& aShmem) {
+ self->mMapped->mShmem = std::move(aShmem);
+ promise->MaybeResolve(0);
+ },
+ [promise](const ipc::ResponseRejectReason&) {
+ promise->MaybeRejectWithAbortError("Internal communication error!");
+ });
+
+ return promise.forget();
+}
+
+void Buffer::GetMappedRange(JSContext* aCx, uint64_t aOffset,
+ const dom::Optional<uint64_t>& aSize,
+ JS::Rooted<JSObject*>* aObject, ErrorResult& aRv) {
+ const auto checkedOffset = CheckedInt<size_t>(aOffset);
+ const auto checkedSize = aSize.WasPassed()
+ ? CheckedInt<size_t>(aSize.Value())
+ : CheckedInt<size_t>(mSize) - aOffset;
+ if (!checkedOffset.isValid() || !checkedSize.isValid()) {
+ aRv.ThrowRangeError("Invalid mapped range");
+ return;
+ }
+ if (!mMapped || !mMapped->IsReady()) {
+ aRv.ThrowInvalidStateError("Buffer is not mapped");
+ return;
+ }
+
+ auto* const arrayBuffer = mParent->CreateExternalArrayBuffer(
+ aCx, checkedOffset.value(), checkedSize.value(), mMapped->mShmem);
+ if (!arrayBuffer) {
+ aRv.NoteJSContextException(aCx);
+ return;
+ }
+
+ aObject->set(arrayBuffer);
+ mMapped->mArrayBuffers.AppendElement(*aObject);
+}
+
+void Buffer::Unmap(JSContext* aCx, ErrorResult& aRv) {
+ if (!mMapped) {
+ return;
+ }
+
+ for (const auto& arrayBuffer : mMapped->mArrayBuffers) {
+ JS::Rooted<JSObject*> rooted(aCx, arrayBuffer);
+ bool ok = JS::DetachArrayBuffer(aCx, rooted);
+ if (!ok) {
+ aRv.NoteJSContextException(aCx);
+ return;
+ }
+ };
+
+ mParent->UnmapBuffer(mId, std::move(mMapped->mShmem), mMapped->mWritable);
+ mMapped.reset();
+}
+
+void Buffer::Destroy() {
+ // TODO: we don't have to implement it right now, but it's used by the
+ // examples
+}
+
+} // namespace webgpu
+} // namespace mozilla
diff --git a/dom/webgpu/Buffer.h b/dom/webgpu/Buffer.h
new file mode 100644
index 0000000000..4b7537cc6d
--- /dev/null
+++ b/dom/webgpu/Buffer.h
@@ -0,0 +1,71 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_BUFFER_H_
+#define GPU_BUFFER_H_
+
+#include "js/RootingAPI.h"
+#include "mozilla/dom/Nullable.h"
+#include "mozilla/ipc/Shmem.h"
+#include "mozilla/webgpu/WebGPUTypes.h"
+#include "ObjectModel.h"
+
+namespace mozilla {
+namespace ipc {
+class Shmem;
+} // namespace ipc
+namespace webgpu {
+
+class Device;
+
+struct MappedInfo {
+ ipc::Shmem mShmem;
+ // True if mapping is requested for writing.
+ bool mWritable = false;
+ // Populated by `GetMappedRange`.
+ nsTArray<JS::Heap<JSObject*>> mArrayBuffers;
+
+ MappedInfo() = default;
+ MappedInfo(const MappedInfo&) = delete;
+ bool IsReady() const { return mShmem.IsReadable(); }
+};
+
+class Buffer final : public ObjectBase, public ChildOf<Device> {
+ public:
+ GPU_DECL_CYCLE_COLLECTION(Buffer)
+ GPU_DECL_JS_WRAP(Buffer)
+
+ Buffer(Device* const aParent, RawId aId, BufferAddress aSize);
+ void SetMapped(ipc::Shmem&& aShmem, bool aWritable);
+
+ const RawId mId;
+
+ private:
+ virtual ~Buffer();
+ void Cleanup();
+
+ // Note: we can't map a buffer with the size that don't fit into `size_t`
+ // (which may be smaller than `BufferAddress`), but general not all buffers
+ // are mapped.
+ const BufferAddress mSize;
+ nsString mLabel;
+ // Information about the currently active mapping.
+ Maybe<MappedInfo> mMapped;
+
+ public:
+ already_AddRefed<dom::Promise> MapAsync(uint32_t aMode, uint64_t aOffset,
+ const dom::Optional<uint64_t>& aSize,
+ ErrorResult& aRv);
+ void GetMappedRange(JSContext* aCx, uint64_t aOffset,
+ const dom::Optional<uint64_t>& aSize,
+ JS::Rooted<JSObject*>* aObject, ErrorResult& aRv);
+ void Unmap(JSContext* aCx, ErrorResult& aRv);
+ void Destroy();
+};
+
+} // namespace webgpu
+} // namespace mozilla
+
+#endif // GPU_BUFFER_H_
diff --git a/dom/webgpu/CanvasContext.cpp b/dom/webgpu/CanvasContext.cpp
new file mode 100644
index 0000000000..a7a8b8ed9b
--- /dev/null
+++ b/dom/webgpu/CanvasContext.cpp
@@ -0,0 +1,125 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/dom/WebGPUBinding.h"
+#include "CanvasContext.h"
+#include "SwapChain.h"
+#include "nsDisplayList.h"
+#include "LayerUserData.h"
+#include "mozilla/dom/HTMLCanvasElement.h"
+#include "mozilla/layers/CompositorManagerChild.h"
+#include "mozilla/layers/RenderRootStateManager.h"
+#include "mozilla/layers/WebRenderBridgeChild.h"
+#include "ipc/WebGPUChild.h"
+
+namespace mozilla {
+namespace webgpu {
+
+NS_IMPL_CYCLE_COLLECTING_ADDREF(CanvasContext)
+NS_IMPL_CYCLE_COLLECTING_RELEASE(CanvasContext)
+
+GPU_IMPL_CYCLE_COLLECTION_WRAPPERCACHE(CanvasContext, mSwapChain,
+ mCanvasElement, mOffscreenCanvas)
+
+NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(CanvasContext)
+ NS_WRAPPERCACHE_INTERFACE_MAP_ENTRY
+ NS_INTERFACE_MAP_ENTRY(nsICanvasRenderingContextInternal)
+ NS_INTERFACE_MAP_ENTRY(nsISupports)
+NS_INTERFACE_MAP_END
+
+CanvasContext::CanvasContext()
+ : mExternalImageId(layers::CompositorManagerChild::GetInstance()
+ ->GetNextExternalImageId()) {}
+
+CanvasContext::~CanvasContext() {
+ Cleanup();
+ RemovePostRefreshObserver();
+}
+
+void CanvasContext::Cleanup() {
+ if (mSwapChain) {
+ mSwapChain->Destroy(mExternalImageId);
+ mSwapChain = nullptr;
+ }
+ if (mRenderRootStateManager && mImageKey) {
+ mRenderRootStateManager->AddImageKeyForDiscard(mImageKey.value());
+ mRenderRootStateManager = nullptr;
+ mImageKey.reset();
+ }
+}
+
+JSObject* CanvasContext::WrapObject(JSContext* aCx,
+ JS::Handle<JSObject*> aGivenProto) {
+ return dom::GPUCanvasContext_Binding::Wrap(aCx, this, aGivenProto);
+}
+
+already_AddRefed<layers::Layer> CanvasContext::GetCanvasLayer(
+ nsDisplayListBuilder* aBuilder, layers::Layer* aOldLayer,
+ layers::LayerManager* aManager) {
+ return nullptr;
+}
+
+bool CanvasContext::UpdateWebRenderCanvasData(
+ nsDisplayListBuilder* aBuilder, WebRenderCanvasData* aCanvasData) {
+ return true;
+}
+
+RefPtr<SwapChain> CanvasContext::ConfigureSwapChain(
+ const dom::GPUSwapChainDescriptor& aDesc, ErrorResult& aRv) {
+ Cleanup();
+
+ gfx::SurfaceFormat format;
+ switch (aDesc.mFormat) {
+ case dom::GPUTextureFormat::Rgba8unorm:
+ format = gfx::SurfaceFormat::R8G8B8A8;
+ break;
+ case dom::GPUTextureFormat::Bgra8unorm:
+ format = gfx::SurfaceFormat::B8G8R8A8;
+ break;
+ default:
+ aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
+ return nullptr;
+ }
+
+ dom::GPUExtent3DDict extent;
+ extent.mWidth = mWidth;
+ extent.mHeight = mHeight;
+ extent.mDepth = 1;
+ mSwapChain = new SwapChain(aDesc, extent, mExternalImageId, format);
+
+ // Force a new frame to be built, which will execute the
+ // `CanvasContextType::WebGPU` switch case in `CreateWebRenderCommands` and
+ // populate the WR user data.
+ mCanvasElement->InvalidateCanvas();
+
+ mSwapChain->GetCurrentTexture()->mTargetCanvasElement = mCanvasElement;
+ return mSwapChain;
+}
+
+Maybe<wr::ImageKey> CanvasContext::GetImageKey() const { return mImageKey; }
+
+wr::ImageKey CanvasContext::CreateImageKey(
+ layers::RenderRootStateManager* aManager) {
+ const auto key = aManager->WrBridge()->GetNextImageKey();
+ mRenderRootStateManager = aManager;
+ mImageKey = Some(key);
+ return key;
+}
+
+bool CanvasContext::UpdateWebRenderLocalCanvasData(
+ layers::WebRenderLocalCanvasData* aCanvasData) {
+ if (!mSwapChain || !mSwapChain->GetGpuBridge()) {
+ return false;
+ }
+
+ aCanvasData->mGpuBridge = mSwapChain->GetGpuBridge();
+ aCanvasData->mGpuTextureId = mSwapChain->GetCurrentTexture()->mId;
+ aCanvasData->mExternalImageId = mExternalImageId;
+ aCanvasData->mFormat = mSwapChain->mFormat;
+ return true;
+}
+
+} // namespace webgpu
+} // namespace mozilla
diff --git a/dom/webgpu/CanvasContext.h b/dom/webgpu/CanvasContext.h
new file mode 100644
index 0000000000..86f3b46c82
--- /dev/null
+++ b/dom/webgpu/CanvasContext.h
@@ -0,0 +1,114 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_CanvasContext_H_
+#define GPU_CanvasContext_H_
+
+#include "nsICanvasRenderingContextInternal.h"
+#include "nsWrapperCache.h"
+#include "ObjectModel.h"
+#include "SwapChain.h"
+#include "mozilla/webrender/WebRenderAPI.h"
+
+namespace mozilla {
+namespace dom {
+class Promise;
+} // namespace dom
+namespace layers {
+class WebRenderLocalCanvasData;
+};
+namespace webgpu {
+class Device;
+class SwapChain;
+class Texture;
+
+class CanvasContext final : public nsICanvasRenderingContextInternal,
+ public nsWrapperCache {
+ private:
+ virtual ~CanvasContext();
+ void Cleanup();
+
+ public:
+ // nsISupports interface + CC
+ NS_DECL_CYCLE_COLLECTING_ISUPPORTS
+ NS_DECL_CYCLE_COLLECTION_SCRIPT_HOLDER_CLASS(CanvasContext)
+
+ CanvasContext();
+
+ JSObject* WrapObject(JSContext* aCx,
+ JS::Handle<JSObject*> aGivenProto) override;
+
+ void RemoveSwapChain();
+
+ Maybe<wr::ImageKey> GetImageKey() const;
+ wr::ImageKey CreateImageKey(layers::RenderRootStateManager* aManager);
+ bool UpdateWebRenderLocalCanvasData(
+ layers::WebRenderLocalCanvasData* aCanvasData);
+
+ const wr::ExternalImageId mExternalImageId;
+
+ public: // nsICanvasRenderingContextInternal
+ int32_t GetWidth() override { return mWidth; }
+ int32_t GetHeight() override { return mHeight; }
+
+ NS_IMETHOD SetDimensions(int32_t aWidth, int32_t aHeight) override {
+ mWidth = aWidth;
+ mHeight = aHeight;
+ return NS_OK;
+ }
+ NS_IMETHOD InitializeWithDrawTarget(
+ nsIDocShell* aShell, NotNull<gfx::DrawTarget*> aTarget) override {
+ return NS_OK;
+ }
+
+ mozilla::UniquePtr<uint8_t[]> GetImageBuffer(int32_t* aFormat) override {
+ MOZ_CRASH("todo");
+ }
+ NS_IMETHOD GetInputStream(const char* aMimeType,
+ const nsAString& aEncoderOptions,
+ nsIInputStream** aStream) override {
+ *aStream = nullptr;
+ return NS_OK;
+ }
+
+ already_AddRefed<mozilla::gfx::SourceSurface> GetSurfaceSnapshot(
+ gfxAlphaType* aOutAlphaType) override {
+ return nullptr;
+ }
+
+ void SetOpaqueValueFromOpaqueAttr(bool aOpaqueAttrValue) override {}
+ bool GetIsOpaque() override { return true; }
+ NS_IMETHOD Reset() override { return NS_OK; }
+ already_AddRefed<Layer> GetCanvasLayer(nsDisplayListBuilder* aBuilder,
+ Layer* aOldLayer,
+ LayerManager* aManager) override;
+ bool UpdateWebRenderCanvasData(nsDisplayListBuilder* aBuilder,
+ WebRenderCanvasData* aCanvasData) override;
+ void MarkContextClean() override {}
+
+ NS_IMETHOD Redraw(const gfxRect& aDirty) override { return NS_OK; }
+ NS_IMETHOD SetIsIPC(bool aIsIPC) override { return NS_OK; }
+
+ void DidRefresh() override {}
+
+ void MarkContextCleanForFrameCapture() override {}
+ bool IsContextCleanForFrameCapture() override { return false; }
+
+ public:
+ RefPtr<SwapChain> ConfigureSwapChain(const dom::GPUSwapChainDescriptor& aDesc,
+ ErrorResult& aRv);
+
+ private:
+ uint32_t mWidth = 0, mHeight = 0;
+
+ RefPtr<SwapChain> mSwapChain;
+ RefPtr<layers::RenderRootStateManager> mRenderRootStateManager;
+ Maybe<wr::ImageKey> mImageKey;
+};
+
+} // namespace webgpu
+} // namespace mozilla
+
+#endif // GPU_CanvasContext_H_
diff --git a/dom/webgpu/CommandBuffer.cpp b/dom/webgpu/CommandBuffer.cpp
new file mode 100644
index 0000000000..d6ea033f73
--- /dev/null
+++ b/dom/webgpu/CommandBuffer.cpp
@@ -0,0 +1,52 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/dom/WebGPUBinding.h"
+#include "CommandBuffer.h"
+#include "ipc/WebGPUChild.h"
+
+#include "mozilla/dom/HTMLCanvasElement.h"
+#include "Device.h"
+
+namespace mozilla {
+namespace webgpu {
+
+GPU_IMPL_CYCLE_COLLECTION(CommandBuffer, mParent)
+GPU_IMPL_JS_WRAP(CommandBuffer)
+
+CommandBuffer::CommandBuffer(
+ Device* const aParent, RawId aId,
+ const WeakPtr<dom::HTMLCanvasElement>& aTargetCanvasElement)
+ : ChildOf(aParent), mId(aId), mTargetCanvasElement(aTargetCanvasElement) {
+ if (!aId) {
+ mValid = false;
+ }
+}
+
+CommandBuffer::~CommandBuffer() { Cleanup(); }
+
+void CommandBuffer::Cleanup() {
+ if (mValid && mParent) {
+ mValid = false;
+ auto bridge = mParent->GetBridge();
+ if (bridge && bridge->IsOpen()) {
+ bridge->SendCommandBufferDestroy(mId);
+ }
+ }
+}
+
+Maybe<RawId> CommandBuffer::Commit() {
+ if (!mValid) {
+ return Nothing();
+ }
+ mValid = false;
+ if (mTargetCanvasElement) {
+ mTargetCanvasElement->InvalidateCanvasContent(nullptr);
+ }
+ return Some(mId);
+}
+
+} // namespace webgpu
+} // namespace mozilla
diff --git a/dom/webgpu/CommandBuffer.h b/dom/webgpu/CommandBuffer.h
new file mode 100644
index 0000000000..e0748c29fd
--- /dev/null
+++ b/dom/webgpu/CommandBuffer.h
@@ -0,0 +1,43 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_CommandBuffer_H_
+#define GPU_CommandBuffer_H_
+
+#include "mozilla/WeakPtr.h"
+#include "nsWrapperCache.h"
+#include "ObjectModel.h"
+
+namespace mozilla {
+namespace dom {
+class HTMLCanvasElement;
+} // namespace dom
+namespace webgpu {
+
+class Device;
+
+class CommandBuffer final : public ObjectBase, public ChildOf<Device> {
+ public:
+ GPU_DECL_CYCLE_COLLECTION(CommandBuffer)
+ GPU_DECL_JS_WRAP(CommandBuffer)
+
+ CommandBuffer(Device* const aParent, RawId aId,
+ const WeakPtr<dom::HTMLCanvasElement>& aTargetCanvasElement);
+
+ Maybe<RawId> Commit();
+
+ private:
+ CommandBuffer() = delete;
+ ~CommandBuffer();
+ void Cleanup();
+
+ const RawId mId;
+ const WeakPtr<dom::HTMLCanvasElement> mTargetCanvasElement;
+};
+
+} // namespace webgpu
+} // namespace mozilla
+
+#endif // GPU_CommandBuffer_H_
diff --git a/dom/webgpu/CommandEncoder.cpp b/dom/webgpu/CommandEncoder.cpp
new file mode 100644
index 0000000000..c10e80580d
--- /dev/null
+++ b/dom/webgpu/CommandEncoder.cpp
@@ -0,0 +1,220 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/dom/WebGPUBinding.h"
+#include "CommandEncoder.h"
+
+#include "CommandBuffer.h"
+#include "Buffer.h"
+#include "ComputePassEncoder.h"
+#include "Device.h"
+#include "RenderPassEncoder.h"
+#include "mozilla/dom/HTMLCanvasElement.h"
+#include "mozilla/webgpu/ffi/wgpu.h"
+#include "ipc/WebGPUChild.h"
+
+namespace mozilla {
+namespace webgpu {
+
+GPU_IMPL_CYCLE_COLLECTION(CommandEncoder, mParent, mBridge)
+GPU_IMPL_JS_WRAP(CommandEncoder)
+
+void CommandEncoder::ConvertTextureDataLayoutToFFI(
+ const dom::GPUTextureDataLayout& aLayout,
+ ffi::WGPUTextureDataLayout* aLayoutFFI) {
+ *aLayoutFFI = {};
+ aLayoutFFI->offset = aLayout.mOffset;
+ aLayoutFFI->bytes_per_row = aLayout.mBytesPerRow;
+ aLayoutFFI->rows_per_image = aLayout.mRowsPerImage;
+}
+
+void CommandEncoder::ConvertTextureCopyViewToFFI(
+ const dom::GPUTextureCopyView& aView, ffi::WGPUTextureCopyView* aViewFFI) {
+ *aViewFFI = {};
+ aViewFFI->texture = aView.mTexture->mId;
+ aViewFFI->mip_level = aView.mMipLevel;
+ if (aView.mOrigin.WasPassed()) {
+ const auto& origin = aView.mOrigin.Value();
+ if (origin.IsRangeEnforcedUnsignedLongSequence()) {
+ const auto& seq = origin.GetAsRangeEnforcedUnsignedLongSequence();
+ aViewFFI->origin.x = seq.Length() > 0 ? seq[0] : 0;
+ aViewFFI->origin.y = seq.Length() > 1 ? seq[1] : 0;
+ aViewFFI->origin.z = seq.Length() > 2 ? seq[2] : 0;
+ } else if (origin.IsGPUOrigin3DDict()) {
+ const auto& dict = origin.GetAsGPUOrigin3DDict();
+ aViewFFI->origin.x = dict.mX;
+ aViewFFI->origin.y = dict.mY;
+ aViewFFI->origin.z = dict.mZ;
+ } else {
+ MOZ_CRASH("Unexpected origin type");
+ }
+ }
+}
+
+void CommandEncoder::ConvertExtent3DToFFI(const dom::GPUExtent3D& aExtent,
+ ffi::WGPUExtent3d* aExtentFFI) {
+ *aExtentFFI = {};
+ if (aExtent.IsRangeEnforcedUnsignedLongSequence()) {
+ const auto& seq = aExtent.GetAsRangeEnforcedUnsignedLongSequence();
+ aExtentFFI->width = seq.Length() > 0 ? seq[0] : 0;
+ aExtentFFI->height = seq.Length() > 1 ? seq[1] : 0;
+ aExtentFFI->depth = seq.Length() > 2 ? seq[2] : 0;
+ } else if (aExtent.IsGPUExtent3DDict()) {
+ const auto& dict = aExtent.GetAsGPUExtent3DDict();
+ aExtentFFI->width = dict.mWidth;
+ aExtentFFI->height = dict.mHeight;
+ aExtentFFI->depth = dict.mDepth;
+ } else {
+ MOZ_CRASH("Unexptected extent type");
+ }
+}
+
+static ffi::WGPUBufferCopyView ConvertBufferCopyView(
+ const dom::GPUBufferCopyView& aView) {
+ ffi::WGPUBufferCopyView view = {};
+ view.buffer = aView.mBuffer->mId;
+ CommandEncoder::ConvertTextureDataLayoutToFFI(aView, &view.layout);
+ return view;
+}
+
+static ffi::WGPUTextureCopyView ConvertTextureCopyView(
+ const dom::GPUTextureCopyView& aView) {
+ ffi::WGPUTextureCopyView view = {};
+ CommandEncoder::ConvertTextureCopyViewToFFI(aView, &view);
+ return view;
+}
+
+static ffi::WGPUExtent3d ConvertExtent(const dom::GPUExtent3D& aExtent) {
+ ffi::WGPUExtent3d extent = {};
+ CommandEncoder::ConvertExtent3DToFFI(aExtent, &extent);
+ return extent;
+}
+
+CommandEncoder::CommandEncoder(Device* const aParent,
+ WebGPUChild* const aBridge, RawId aId)
+ : ChildOf(aParent), mId(aId), mBridge(aBridge) {}
+
+CommandEncoder::~CommandEncoder() { Cleanup(); }
+
+void CommandEncoder::Cleanup() {
+ if (mValid && mParent) {
+ mValid = false;
+ auto bridge = mParent->GetBridge();
+ if (bridge && bridge->IsOpen()) {
+ bridge->SendCommandEncoderDestroy(mId);
+ }
+ }
+}
+
+void CommandEncoder::CopyBufferToBuffer(const Buffer& aSource,
+ BufferAddress aSourceOffset,
+ const Buffer& aDestination,
+ BufferAddress aDestinationOffset,
+ BufferAddress aSize) {
+ if (mValid) {
+ ipc::ByteBuf bb;
+ ffi::wgpu_command_encoder_copy_buffer_to_buffer(
+ aSource.mId, aSourceOffset, aDestination.mId, aDestinationOffset, aSize,
+ ToFFI(&bb));
+ mBridge->SendCommandEncoderAction(mId, mParent->mId, std::move(bb));
+ }
+}
+
+void CommandEncoder::CopyBufferToTexture(
+ const dom::GPUBufferCopyView& aSource,
+ const dom::GPUTextureCopyView& aDestination,
+ const dom::GPUExtent3D& aCopySize) {
+ if (mValid) {
+ ipc::ByteBuf bb;
+ ffi::wgpu_command_encoder_copy_buffer_to_texture(
+ ConvertBufferCopyView(aSource), ConvertTextureCopyView(aDestination),
+ ConvertExtent(aCopySize), ToFFI(&bb));
+ mBridge->SendCommandEncoderAction(mId, mParent->mId, std::move(bb));
+ }
+}
+void CommandEncoder::CopyTextureToBuffer(
+ const dom::GPUTextureCopyView& aSource,
+ const dom::GPUBufferCopyView& aDestination,
+ const dom::GPUExtent3D& aCopySize) {
+ if (mValid) {
+ ipc::ByteBuf bb;
+ ffi::wgpu_command_encoder_copy_texture_to_buffer(
+ ConvertTextureCopyView(aSource), ConvertBufferCopyView(aDestination),
+ ConvertExtent(aCopySize), ToFFI(&bb));
+ mBridge->SendCommandEncoderAction(mId, mParent->mId, std::move(bb));
+ }
+}
+void CommandEncoder::CopyTextureToTexture(
+ const dom::GPUTextureCopyView& aSource,
+ const dom::GPUTextureCopyView& aDestination,
+ const dom::GPUExtent3D& aCopySize) {
+ if (mValid) {
+ ipc::ByteBuf bb;
+ ffi::wgpu_command_encoder_copy_texture_to_texture(
+ ConvertTextureCopyView(aSource), ConvertTextureCopyView(aDestination),
+ ConvertExtent(aCopySize), ToFFI(&bb));
+ mBridge->SendCommandEncoderAction(mId, mParent->mId, std::move(bb));
+ }
+}
+
+already_AddRefed<ComputePassEncoder> CommandEncoder::BeginComputePass(
+ const dom::GPUComputePassDescriptor& aDesc) {
+ RefPtr<ComputePassEncoder> pass = new ComputePassEncoder(this, aDesc);
+ return pass.forget();
+}
+
+already_AddRefed<RenderPassEncoder> CommandEncoder::BeginRenderPass(
+ const dom::GPURenderPassDescriptor& aDesc) {
+ for (const auto& at : aDesc.mColorAttachments) {
+ auto* targetCanvasElement = at.mAttachment->GetTargetCanvasElement();
+ if (targetCanvasElement) {
+ if (mTargetCanvasElement) {
+ NS_WARNING("Command encoder touches more than one canvas");
+ } else {
+ mTargetCanvasElement = targetCanvasElement;
+ }
+ }
+ }
+
+ RefPtr<RenderPassEncoder> pass = new RenderPassEncoder(this, aDesc);
+ return pass.forget();
+}
+
+void CommandEncoder::EndComputePass(ffi::WGPUComputePass& aPass,
+ ErrorResult& aRv) {
+ if (!mValid) {
+ return aRv.ThrowInvalidStateError("Command encoder is not valid");
+ }
+
+ ipc::ByteBuf byteBuf;
+ ffi::wgpu_compute_pass_finish(&aPass, ToFFI(&byteBuf));
+ mBridge->SendCommandEncoderAction(mId, mParent->mId, std::move(byteBuf));
+}
+
+void CommandEncoder::EndRenderPass(ffi::WGPURenderPass& aPass,
+ ErrorResult& aRv) {
+ if (!mValid) {
+ return aRv.ThrowInvalidStateError("Command encoder is not valid");
+ }
+
+ ipc::ByteBuf byteBuf;
+ ffi::wgpu_render_pass_finish(&aPass, ToFFI(&byteBuf));
+ mBridge->SendCommandEncoderAction(mId, mParent->mId, std::move(byteBuf));
+}
+
+already_AddRefed<CommandBuffer> CommandEncoder::Finish(
+ const dom::GPUCommandBufferDescriptor& aDesc) {
+ RawId id = 0;
+ if (mValid) {
+ mValid = false;
+ id = mBridge->CommandEncoderFinish(mId, mParent->mId, aDesc);
+ }
+ RefPtr<CommandBuffer> comb =
+ new CommandBuffer(mParent, id, mTargetCanvasElement);
+ return comb.forget();
+}
+
+} // namespace webgpu
+} // namespace mozilla
diff --git a/dom/webgpu/CommandEncoder.h b/dom/webgpu/CommandEncoder.h
new file mode 100644
index 0000000000..ca04809db6
--- /dev/null
+++ b/dom/webgpu/CommandEncoder.h
@@ -0,0 +1,102 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_CommandEncoder_H_
+#define GPU_CommandEncoder_H_
+
+#include "mozilla/dom/TypedArray.h"
+#include "mozilla/WeakPtr.h"
+#include "mozilla/webgpu/WebGPUTypes.h"
+#include "nsWrapperCache.h"
+#include "ObjectModel.h"
+
+namespace mozilla {
+namespace dom {
+struct GPUComputePassDescriptor;
+struct GPUTextureDataLayout;
+class HTMLCanvasElement;
+template <typename T>
+class Sequence;
+class GPUComputePipelineOrGPURenderPipeline;
+class RangeEnforcedUnsignedLongSequenceOrGPUExtent3DDict;
+struct GPUBufferCopyView;
+struct GPUCommandBufferDescriptor;
+struct GPUImageBitmapCopyView;
+struct GPURenderPassDescriptor;
+struct GPUTextureCopyView;
+typedef RangeEnforcedUnsignedLongSequenceOrGPUExtent3DDict GPUExtent3D;
+} // namespace dom
+namespace webgpu {
+namespace ffi {
+struct WGPUComputePass;
+struct WGPURenderPass;
+struct WGPUTextureDataLayout;
+struct WGPUTextureCopyView_TextureId;
+struct WGPUExtent3d;
+} // namespace ffi
+
+class BindGroup;
+class Buffer;
+class CommandBuffer;
+class ComputePassEncoder;
+class Device;
+class RenderPassEncoder;
+
+class CommandEncoder final : public ObjectBase, public ChildOf<Device> {
+ public:
+ GPU_DECL_CYCLE_COLLECTION(CommandEncoder)
+ GPU_DECL_JS_WRAP(CommandEncoder)
+
+ CommandEncoder(Device* const aParent, WebGPUChild* const aBridge, RawId aId);
+
+ const RawId mId;
+
+ static void ConvertTextureDataLayoutToFFI(
+ const dom::GPUTextureDataLayout& aLayout,
+ ffi::WGPUTextureDataLayout* aLayoutFFI);
+ static void ConvertTextureCopyViewToFFI(
+ const dom::GPUTextureCopyView& aView,
+ ffi::WGPUTextureCopyView_TextureId* aViewFFI);
+ static void ConvertExtent3DToFFI(const dom::GPUExtent3D& aExtent,
+ ffi::WGPUExtent3d* aExtentFFI);
+
+ private:
+ ~CommandEncoder();
+ void Cleanup();
+
+ RefPtr<WebGPUChild> mBridge;
+ // TODO: support multiple target canvases per command encoder
+ WeakPtr<dom::HTMLCanvasElement> mTargetCanvasElement;
+
+ public:
+ void EndComputePass(ffi::WGPUComputePass& aPass, ErrorResult& aRv);
+ void EndRenderPass(ffi::WGPURenderPass& aPass, ErrorResult& aRv);
+
+ void CopyBufferToBuffer(const Buffer& aSource, BufferAddress aSourceOffset,
+ const Buffer& aDestination,
+ BufferAddress aDestinationOffset,
+ BufferAddress aSize);
+ void CopyBufferToTexture(const dom::GPUBufferCopyView& aSource,
+ const dom::GPUTextureCopyView& aDestination,
+ const dom::GPUExtent3D& aCopySize);
+ void CopyTextureToBuffer(const dom::GPUTextureCopyView& aSource,
+ const dom::GPUBufferCopyView& aDestination,
+ const dom::GPUExtent3D& aCopySize);
+ void CopyTextureToTexture(const dom::GPUTextureCopyView& aSource,
+ const dom::GPUTextureCopyView& aDestination,
+ const dom::GPUExtent3D& aCopySize);
+
+ already_AddRefed<ComputePassEncoder> BeginComputePass(
+ const dom::GPUComputePassDescriptor& aDesc);
+ already_AddRefed<RenderPassEncoder> BeginRenderPass(
+ const dom::GPURenderPassDescriptor& aDesc);
+ already_AddRefed<CommandBuffer> Finish(
+ const dom::GPUCommandBufferDescriptor& aDesc);
+};
+
+} // namespace webgpu
+} // namespace mozilla
+
+#endif // GPU_CommandEncoder_H_
diff --git a/dom/webgpu/ComputePassEncoder.cpp b/dom/webgpu/ComputePassEncoder.cpp
new file mode 100644
index 0000000000..cbc6384597
--- /dev/null
+++ b/dom/webgpu/ComputePassEncoder.cpp
@@ -0,0 +1,88 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/dom/WebGPUBinding.h"
+#include "ComputePassEncoder.h"
+#include "BindGroup.h"
+#include "ComputePipeline.h"
+#include "CommandEncoder.h"
+
+#include "mozilla/webgpu/ffi/wgpu.h"
+
+namespace mozilla {
+namespace webgpu {
+
+GPU_IMPL_CYCLE_COLLECTION(ComputePassEncoder, mParent, mUsedBindGroups,
+ mUsedPipelines)
+GPU_IMPL_JS_WRAP(ComputePassEncoder)
+
+ffi::WGPUComputePass* ScopedFfiComputeTraits::empty() { return nullptr; }
+
+void ScopedFfiComputeTraits::release(ffi::WGPUComputePass* raw) {
+ if (raw) {
+ ffi::wgpu_compute_pass_destroy(raw);
+ }
+}
+
+ffi::WGPUComputePass* BeginComputePass(
+ RawId aEncoderId, const dom::GPUComputePassDescriptor& aDesc) {
+ ffi::WGPUComputePassDescriptor desc = {};
+ Unused << aDesc; // no useful fields
+ return ffi::wgpu_command_encoder_begin_compute_pass(aEncoderId, &desc);
+}
+
+ComputePassEncoder::ComputePassEncoder(
+ CommandEncoder* const aParent, const dom::GPUComputePassDescriptor& aDesc)
+ : ChildOf(aParent), mPass(BeginComputePass(aParent->mId, aDesc)) {}
+
+ComputePassEncoder::~ComputePassEncoder() {
+ if (mValid) {
+ mValid = false;
+ }
+}
+
+void ComputePassEncoder::SetBindGroup(
+ uint32_t aSlot, const BindGroup& aBindGroup,
+ const dom::Sequence<uint32_t>& aDynamicOffsets) {
+ if (mValid) {
+ mUsedBindGroups.AppendElement(&aBindGroup);
+ ffi::wgpu_compute_pass_set_bind_group(mPass, aSlot, aBindGroup.mId,
+ aDynamicOffsets.Elements(),
+ aDynamicOffsets.Length());
+ }
+}
+
+void ComputePassEncoder::SetPipeline(const ComputePipeline& aPipeline) {
+ if (mValid) {
+ mUsedPipelines.AppendElement(&aPipeline);
+ ffi::wgpu_compute_pass_set_pipeline(mPass, aPipeline.mId);
+ }
+}
+
+void ComputePassEncoder::Dispatch(uint32_t x, uint32_t y, uint32_t z) {
+ if (mValid) {
+ ffi::wgpu_compute_pass_dispatch(mPass, x, y, z);
+ }
+}
+
+void ComputePassEncoder::DispatchIndirect(const Buffer& aIndirectBuffer,
+ uint64_t aIndirectOffset) {
+ if (mValid) {
+ ffi::wgpu_compute_pass_dispatch_indirect(mPass, aIndirectBuffer.mId,
+ aIndirectOffset);
+ }
+}
+
+void ComputePassEncoder::EndPass(ErrorResult& aRv) {
+ if (mValid) {
+ mValid = false;
+ auto* pass = mPass.forget();
+ MOZ_ASSERT(pass);
+ mParent->EndComputePass(*pass, aRv);
+ }
+}
+
+} // namespace webgpu
+} // namespace mozilla
diff --git a/dom/webgpu/ComputePassEncoder.h b/dom/webgpu/ComputePassEncoder.h
new file mode 100644
index 0000000000..04828620de
--- /dev/null
+++ b/dom/webgpu/ComputePassEncoder.h
@@ -0,0 +1,61 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_ComputePassEncoder_H_
+#define GPU_ComputePassEncoder_H_
+
+#include "mozilla/Scoped.h"
+#include "mozilla/dom/TypedArray.h"
+#include "ObjectModel.h"
+
+namespace mozilla {
+namespace webgpu {
+namespace ffi {
+struct WGPUComputePass;
+} // namespace ffi
+
+class BindGroup;
+class Buffer;
+class CommandEncoder;
+class ComputePipeline;
+
+struct ScopedFfiComputeTraits {
+ typedef ffi::WGPUComputePass* type;
+ static type empty();
+ static void release(type raw);
+};
+
+class ComputePassEncoder final : public ObjectBase,
+ public ChildOf<CommandEncoder> {
+ public:
+ GPU_DECL_CYCLE_COLLECTION(ComputePassEncoder)
+ GPU_DECL_JS_WRAP(ComputePassEncoder)
+
+ ComputePassEncoder(CommandEncoder* const aParent,
+ const dom::GPUComputePassDescriptor& aDesc);
+
+ private:
+ virtual ~ComputePassEncoder();
+ void Cleanup() {}
+
+ Scoped<ScopedFfiComputeTraits> mPass;
+ // keep all the used objects alive while the pass is recorded
+ nsTArray<RefPtr<const BindGroup>> mUsedBindGroups;
+ nsTArray<RefPtr<const ComputePipeline>> mUsedPipelines;
+
+ public:
+ void SetBindGroup(uint32_t aSlot, const BindGroup& aBindGroup,
+ const dom::Sequence<uint32_t>& aDynamicOffsets);
+ void SetPipeline(const ComputePipeline& aPipeline);
+ void Dispatch(uint32_t x, uint32_t y, uint32_t z);
+ void DispatchIndirect(const Buffer& aIndirectBuffer,
+ uint64_t aIndirectOffset);
+ void EndPass(ErrorResult& aRv);
+};
+
+} // namespace webgpu
+} // namespace mozilla
+
+#endif // GPU_ComputePassEncoder_H_
diff --git a/dom/webgpu/ComputePipeline.cpp b/dom/webgpu/ComputePipeline.cpp
new file mode 100644
index 0000000000..6dbaf5118f
--- /dev/null
+++ b/dom/webgpu/ComputePipeline.cpp
@@ -0,0 +1,44 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "ComputePipeline.h"
+
+#include "Device.h"
+#include "ipc/WebGPUChild.h"
+#include "mozilla/dom/WebGPUBinding.h"
+
+namespace mozilla {
+namespace webgpu {
+
+GPU_IMPL_CYCLE_COLLECTION(ComputePipeline, mParent)
+GPU_IMPL_JS_WRAP(ComputePipeline)
+
+ComputePipeline::ComputePipeline(Device* const aParent, RawId aId,
+ nsTArray<RawId>&& aImplicitBindGroupLayoutIds)
+ : ChildOf(aParent),
+ mImplicitBindGroupLayoutIds(std::move(aImplicitBindGroupLayoutIds)),
+ mId(aId) {}
+
+ComputePipeline::~ComputePipeline() { Cleanup(); }
+
+void ComputePipeline::Cleanup() {
+ if (mValid && mParent) {
+ mValid = false;
+ auto bridge = mParent->GetBridge();
+ if (bridge && bridge->IsOpen()) {
+ bridge->SendComputePipelineDestroy(mId);
+ }
+ }
+}
+
+already_AddRefed<BindGroupLayout> ComputePipeline::GetBindGroupLayout(
+ uint32_t index) const {
+ RefPtr<BindGroupLayout> object =
+ new BindGroupLayout(mParent, mImplicitBindGroupLayoutIds[index]);
+ return object.forget();
+}
+
+} // namespace webgpu
+} // namespace mozilla
diff --git a/dom/webgpu/ComputePipeline.h b/dom/webgpu/ComputePipeline.h
new file mode 100644
index 0000000000..75de8e03c6
--- /dev/null
+++ b/dom/webgpu/ComputePipeline.h
@@ -0,0 +1,41 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_ComputePipeline_H_
+#define GPU_ComputePipeline_H_
+
+#include "nsWrapperCache.h"
+#include "ObjectModel.h"
+#include "mozilla/webgpu/WebGPUTypes.h"
+#include "nsTArray.h"
+
+namespace mozilla {
+namespace webgpu {
+
+class BindGroupLayout;
+class Device;
+
+class ComputePipeline final : public ObjectBase, public ChildOf<Device> {
+ const nsTArray<RawId> mImplicitBindGroupLayoutIds;
+
+ public:
+ GPU_DECL_CYCLE_COLLECTION(ComputePipeline)
+ GPU_DECL_JS_WRAP(ComputePipeline)
+
+ const RawId mId;
+
+ ComputePipeline(Device* const aParent, RawId aId,
+ nsTArray<RawId>&& aImplicitBindGroupLayoutIds);
+ already_AddRefed<BindGroupLayout> GetBindGroupLayout(uint32_t index) const;
+
+ private:
+ ~ComputePipeline();
+ void Cleanup();
+};
+
+} // namespace webgpu
+} // namespace mozilla
+
+#endif // GPU_ComputePipeline_H_
diff --git a/dom/webgpu/Device.cpp b/dom/webgpu/Device.cpp
new file mode 100644
index 0000000000..b43bd3a3f3
--- /dev/null
+++ b/dom/webgpu/Device.cpp
@@ -0,0 +1,244 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "js/ArrayBuffer.h"
+#include "js/Value.h"
+#include "mozilla/ErrorResult.h"
+#include "mozilla/Logging.h"
+#include "mozilla/ipc/Shmem.h"
+#include "mozilla/dom/WebGPUBinding.h"
+#include "Device.h"
+#include "CommandEncoder.h"
+#include "BindGroup.h"
+
+#include "Adapter.h"
+#include "Buffer.h"
+#include "ComputePipeline.h"
+#include "Queue.h"
+#include "RenderPipeline.h"
+#include "Sampler.h"
+#include "Texture.h"
+#include "TextureView.h"
+#include "ipc/WebGPUChild.h"
+
+namespace mozilla {
+namespace webgpu {
+
+mozilla::LazyLogModule gWebGPULog("WebGPU");
+
+NS_IMPL_CYCLE_COLLECTION_INHERITED(Device, DOMEventTargetHelper, mBridge,
+ mQueue)
+NS_IMPL_ISUPPORTS_CYCLE_COLLECTION_INHERITED_0(Device, DOMEventTargetHelper)
+GPU_IMPL_JS_WRAP(Device)
+
+static void mapFreeCallback(void* aContents, void* aUserData) {
+ Unused << aContents;
+ Unused << aUserData;
+}
+
+RefPtr<WebGPUChild> Device::GetBridge() { return mBridge; }
+
+JSObject* Device::CreateExternalArrayBuffer(JSContext* aCx, size_t aOffset,
+ size_t aSize,
+ const ipc::Shmem& aShmem) {
+ MOZ_ASSERT(aOffset + aSize <= aShmem.Size<uint8_t>());
+ return JS::NewExternalArrayBuffer(aCx, aSize, aShmem.get<uint8_t>() + aOffset,
+ &mapFreeCallback, nullptr);
+}
+
+Device::Device(Adapter* const aParent, RawId aId)
+ : DOMEventTargetHelper(aParent->GetParentObject()),
+ mId(aId),
+ mBridge(aParent->mBridge),
+ mQueue(new Queue(this, aParent->mBridge, aId)) {
+ mBridge->RegisterDevice(mId, this);
+}
+
+Device::~Device() { Cleanup(); }
+
+void Device::Cleanup() {
+ if (mValid && mBridge && mBridge->IsOpen()) {
+ mValid = false;
+ mBridge->UnregisterDevice(mId);
+ }
+}
+
+void Device::GetLabel(nsAString& aValue) const { aValue = mLabel; }
+void Device::SetLabel(const nsAString& aLabel) { mLabel = aLabel; }
+
+Queue* Device::DefaultQueue() const { return mQueue; }
+
+already_AddRefed<Buffer> Device::CreateBuffer(
+ const dom::GPUBufferDescriptor& aDesc, ErrorResult& aRv) {
+ ipc::Shmem shmem;
+ bool hasMapFlags = aDesc.mUsage & (dom::GPUBufferUsage_Binding::MAP_WRITE |
+ dom::GPUBufferUsage_Binding::MAP_READ);
+ if (hasMapFlags || aDesc.mMappedAtCreation) {
+ const auto checked = CheckedInt<size_t>(aDesc.mSize);
+ if (!checked.isValid()) {
+ aRv.ThrowRangeError("Mappable size is too large");
+ return nullptr;
+ }
+ const auto& size = checked.value();
+
+ // TODO: use `ShmemPool`?
+ if (!mBridge->AllocShmem(size, ipc::Shmem::SharedMemory::TYPE_BASIC,
+ &shmem)) {
+ aRv.ThrowAbortError(
+ nsPrintfCString("Unable to allocate shmem of size %" PRIuPTR, size));
+ return nullptr;
+ }
+
+ // zero out memory
+ memset(shmem.get<uint8_t>(), 0, size);
+ }
+
+ // If the buffer is not mapped at creation, and it has Shmem, we send it
+ // to the GPU process. Otherwise, we keep it.
+ RawId id = mBridge->DeviceCreateBuffer(mId, aDesc);
+ if (hasMapFlags && !aDesc.mMappedAtCreation) {
+ mBridge->SendBufferReturnShmem(id, std::move(shmem));
+ }
+ RefPtr<Buffer> buffer = new Buffer(this, id, aDesc.mSize);
+
+ if (aDesc.mMappedAtCreation) {
+ buffer->SetMapped(std::move(shmem),
+ !(aDesc.mUsage & dom::GPUBufferUsage_Binding::MAP_READ));
+ }
+
+ return buffer.forget();
+}
+
+RefPtr<MappingPromise> Device::MapBufferAsync(RawId aId, uint32_t aMode,
+ size_t aOffset, size_t aSize,
+ ErrorResult& aRv) {
+ ffi::WGPUHostMap mode;
+ switch (aMode) {
+ case dom::GPUMapMode_Binding::READ:
+ mode = ffi::WGPUHostMap_Read;
+ break;
+ case dom::GPUMapMode_Binding::WRITE:
+ mode = ffi::WGPUHostMap_Write;
+ break;
+ default:
+ aRv.ThrowInvalidAccessError(
+ nsPrintfCString("Invalid map flag %u", aMode));
+ return nullptr;
+ }
+
+ const CheckedInt<uint64_t> offset(aOffset);
+ if (!offset.isValid()) {
+ aRv.ThrowRangeError("Mapped offset is too large");
+ return nullptr;
+ }
+ const CheckedInt<uint64_t> size(aSize);
+ if (!size.isValid()) {
+ aRv.ThrowRangeError("Mapped size is too large");
+ return nullptr;
+ }
+
+ return mBridge->SendBufferMap(aId, mode, offset.value(), size.value());
+}
+
+void Device::UnmapBuffer(RawId aId, ipc::Shmem&& aShmem, bool aFlush) {
+ mBridge->SendBufferUnmap(aId, std::move(aShmem), aFlush);
+}
+
+already_AddRefed<Texture> Device::CreateTexture(
+ const dom::GPUTextureDescriptor& aDesc) {
+ RawId id = mBridge->DeviceCreateTexture(mId, aDesc);
+ RefPtr<Texture> texture = new Texture(this, id, aDesc);
+ return texture.forget();
+}
+
+already_AddRefed<Sampler> Device::CreateSampler(
+ const dom::GPUSamplerDescriptor& aDesc) {
+ RawId id = mBridge->DeviceCreateSampler(mId, aDesc);
+ RefPtr<Sampler> sampler = new Sampler(this, id);
+ return sampler.forget();
+}
+
+already_AddRefed<CommandEncoder> Device::CreateCommandEncoder(
+ const dom::GPUCommandEncoderDescriptor& aDesc) {
+ RawId id = mBridge->DeviceCreateCommandEncoder(mId, aDesc);
+ RefPtr<CommandEncoder> encoder = new CommandEncoder(this, mBridge, id);
+ return encoder.forget();
+}
+
+already_AddRefed<BindGroupLayout> Device::CreateBindGroupLayout(
+ const dom::GPUBindGroupLayoutDescriptor& aDesc) {
+ RawId id = mBridge->DeviceCreateBindGroupLayout(mId, aDesc);
+ RefPtr<BindGroupLayout> object = new BindGroupLayout(this, id);
+ return object.forget();
+}
+already_AddRefed<PipelineLayout> Device::CreatePipelineLayout(
+ const dom::GPUPipelineLayoutDescriptor& aDesc) {
+ RawId id = mBridge->DeviceCreatePipelineLayout(mId, aDesc);
+ RefPtr<PipelineLayout> object = new PipelineLayout(this, id);
+ return object.forget();
+}
+already_AddRefed<BindGroup> Device::CreateBindGroup(
+ const dom::GPUBindGroupDescriptor& aDesc) {
+ RawId id = mBridge->DeviceCreateBindGroup(mId, aDesc);
+ RefPtr<BindGroup> object = new BindGroup(this, id);
+ return object.forget();
+}
+
+already_AddRefed<ShaderModule> Device::CreateShaderModule(
+ const dom::GPUShaderModuleDescriptor& aDesc) {
+ if (aDesc.mCode.IsString()) {
+ // we don't yet support WGSL
+ return nullptr;
+ }
+ RawId id = mBridge->DeviceCreateShaderModule(mId, aDesc);
+ RefPtr<ShaderModule> object = new ShaderModule(this, id);
+ return object.forget();
+}
+
+already_AddRefed<ComputePipeline> Device::CreateComputePipeline(
+ const dom::GPUComputePipelineDescriptor& aDesc) {
+ nsTArray<RawId> implicitBindGroupLayoutIds;
+ RawId id = mBridge->DeviceCreateComputePipeline(mId, aDesc,
+ &implicitBindGroupLayoutIds);
+ RefPtr<ComputePipeline> object =
+ new ComputePipeline(this, id, std::move(implicitBindGroupLayoutIds));
+ return object.forget();
+}
+
+already_AddRefed<RenderPipeline> Device::CreateRenderPipeline(
+ const dom::GPURenderPipelineDescriptor& aDesc) {
+ nsTArray<RawId> implicitBindGroupLayoutIds;
+ RawId id = mBridge->DeviceCreateRenderPipeline(mId, aDesc,
+ &implicitBindGroupLayoutIds);
+ RefPtr<RenderPipeline> object =
+ new RenderPipeline(this, id, std::move(implicitBindGroupLayoutIds));
+ return object.forget();
+}
+
+already_AddRefed<Texture> Device::InitSwapChain(
+ const dom::GPUSwapChainDescriptor& aDesc,
+ const dom::GPUExtent3DDict& aExtent3D, wr::ExternalImageId aExternalImageId,
+ gfx::SurfaceFormat aFormat) {
+ const layers::RGBDescriptor rgbDesc(
+ gfx::IntSize(AssertedCast<int>(aExtent3D.mWidth),
+ AssertedCast<int>(aExtent3D.mHeight)),
+ aFormat, false);
+ // buffer count doesn't matter much, will be created on demand
+ const size_t maxBufferCount = 10;
+ mBridge->DeviceCreateSwapChain(mId, rgbDesc, maxBufferCount,
+ aExternalImageId);
+
+ dom::GPUTextureDescriptor desc;
+ desc.mDimension = dom::GPUTextureDimension::_2d;
+ desc.mSize.SetAsGPUExtent3DDict() = aExtent3D;
+ desc.mFormat = aDesc.mFormat;
+ desc.mMipLevelCount = 1;
+ desc.mSampleCount = 1;
+ desc.mUsage = aDesc.mUsage | dom::GPUTextureUsage_Binding::COPY_SRC;
+ return CreateTexture(desc);
+}
+
+} // namespace webgpu
+} // namespace mozilla
diff --git a/dom/webgpu/Device.h b/dom/webgpu/Device.h
new file mode 100644
index 0000000000..c55bacd5ba
--- /dev/null
+++ b/dom/webgpu/Device.h
@@ -0,0 +1,141 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_DEVICE_H_
+#define GPU_DEVICE_H_
+
+#include "mozilla/MozPromise.h"
+#include "mozilla/RefPtr.h"
+#include "mozilla/webgpu/WebGPUTypes.h"
+#include "mozilla/webrender/WebRenderAPI.h"
+#include "mozilla/DOMEventTargetHelper.h"
+
+namespace mozilla {
+namespace dom {
+struct GPUExtensions;
+struct GPUFeatures;
+struct GPULimits;
+struct GPUExtent3DDict;
+
+struct GPUBufferDescriptor;
+struct GPUTextureDescriptor;
+struct GPUSamplerDescriptor;
+struct GPUBindGroupLayoutDescriptor;
+struct GPUPipelineLayoutDescriptor;
+struct GPUBindGroupDescriptor;
+struct GPUBlendStateDescriptor;
+struct GPUDepthStencilStateDescriptor;
+struct GPUInputStateDescriptor;
+struct GPUShaderModuleDescriptor;
+struct GPUAttachmentStateDescriptor;
+struct GPUComputePipelineDescriptor;
+struct GPURenderBundleEncoderDescriptor;
+struct GPURenderPipelineDescriptor;
+struct GPUCommandEncoderDescriptor;
+struct GPUSwapChainDescriptor;
+
+class EventHandlerNonNull;
+class Promise;
+template <typename T>
+class Sequence;
+class GPUBufferOrGPUTexture;
+enum class GPUErrorFilter : uint8_t;
+class GPULogCallback;
+} // namespace dom
+namespace ipc {
+enum class ResponseRejectReason;
+class Shmem;
+} // namespace ipc
+
+namespace webgpu {
+class Adapter;
+class BindGroup;
+class BindGroupLayout;
+class Buffer;
+class CommandEncoder;
+class ComputePipeline;
+class Fence;
+class InputState;
+class PipelineLayout;
+class Queue;
+class RenderBundleEncoder;
+class RenderPipeline;
+class Sampler;
+class ShaderModule;
+class Texture;
+class WebGPUChild;
+
+typedef MozPromise<ipc::Shmem, ipc::ResponseRejectReason, true> MappingPromise;
+
+class Device final : public DOMEventTargetHelper {
+ public:
+ NS_DECL_ISUPPORTS_INHERITED
+ NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(Device, DOMEventTargetHelper)
+ GPU_DECL_JS_WRAP(Device)
+
+ const RawId mId;
+
+ explicit Device(Adapter* const aParent, RawId aId);
+
+ RefPtr<WebGPUChild> GetBridge();
+ static JSObject* CreateExternalArrayBuffer(JSContext* aCx, size_t aOffset,
+ size_t aSize,
+ const ipc::Shmem& aShmem);
+ RefPtr<MappingPromise> MapBufferAsync(RawId aId, uint32_t aMode,
+ size_t aOffset, size_t aSize,
+ ErrorResult& aRv);
+ void UnmapBuffer(RawId aId, ipc::Shmem&& aShmem, bool aFlush);
+ already_AddRefed<Texture> InitSwapChain(
+ const dom::GPUSwapChainDescriptor& aDesc,
+ const dom::GPUExtent3DDict& aExtent3D,
+ wr::ExternalImageId aExternalImageId, gfx::SurfaceFormat aFormat);
+
+ private:
+ ~Device();
+ void Cleanup();
+
+ RefPtr<WebGPUChild> mBridge;
+ bool mValid = true;
+ nsString mLabel;
+ RefPtr<Queue> mQueue;
+
+ public:
+ void GetLabel(nsAString& aValue) const;
+ void SetLabel(const nsAString& aLabel);
+
+ Queue* DefaultQueue() const;
+
+ already_AddRefed<Buffer> CreateBuffer(const dom::GPUBufferDescriptor& aDesc,
+ ErrorResult& aRv);
+
+ already_AddRefed<Texture> CreateTexture(
+ const dom::GPUTextureDescriptor& aDesc);
+ already_AddRefed<Sampler> CreateSampler(
+ const dom::GPUSamplerDescriptor& aDesc);
+
+ already_AddRefed<CommandEncoder> CreateCommandEncoder(
+ const dom::GPUCommandEncoderDescriptor& aDesc);
+
+ already_AddRefed<BindGroupLayout> CreateBindGroupLayout(
+ const dom::GPUBindGroupLayoutDescriptor& aDesc);
+ already_AddRefed<PipelineLayout> CreatePipelineLayout(
+ const dom::GPUPipelineLayoutDescriptor& aDesc);
+ already_AddRefed<BindGroup> CreateBindGroup(
+ const dom::GPUBindGroupDescriptor& aDesc);
+
+ already_AddRefed<ShaderModule> CreateShaderModule(
+ const dom::GPUShaderModuleDescriptor& aDesc);
+ already_AddRefed<ComputePipeline> CreateComputePipeline(
+ const dom::GPUComputePipelineDescriptor& aDesc);
+ already_AddRefed<RenderPipeline> CreateRenderPipeline(
+ const dom::GPURenderPipelineDescriptor& aDesc);
+
+ IMPL_EVENT_HANDLER(uncapturederror)
+};
+
+} // namespace webgpu
+} // namespace mozilla
+
+#endif // GPU_DEVICE_H_
diff --git a/dom/webgpu/DeviceLostInfo.cpp b/dom/webgpu/DeviceLostInfo.cpp
new file mode 100644
index 0000000000..602938ffee
--- /dev/null
+++ b/dom/webgpu/DeviceLostInfo.cpp
@@ -0,0 +1,16 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "DeviceLostInfo.h"
+#include "mozilla/dom/WebGPUBinding.h"
+
+namespace mozilla {
+namespace webgpu {
+
+GPU_IMPL_CYCLE_COLLECTION(DeviceLostInfo, mParent)
+GPU_IMPL_JS_WRAP(DeviceLostInfo)
+
+} // namespace webgpu
+} // namespace mozilla
diff --git a/dom/webgpu/DeviceLostInfo.h b/dom/webgpu/DeviceLostInfo.h
new file mode 100644
index 0000000000..fda8fa5093
--- /dev/null
+++ b/dom/webgpu/DeviceLostInfo.h
@@ -0,0 +1,33 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_DeviceLostInfo_H_
+#define GPU_DeviceLostInfo_H_
+
+#include "nsWrapperCache.h"
+#include "ObjectModel.h"
+
+namespace mozilla {
+namespace webgpu {
+class Device;
+
+class DeviceLostInfo final : public nsWrapperCache, public ChildOf<Device> {
+ public:
+ GPU_DECL_CYCLE_COLLECTION(DeviceLostInfo)
+ GPU_DECL_JS_WRAP(DeviceLostInfo)
+
+ private:
+ DeviceLostInfo() = delete;
+ ~DeviceLostInfo() = default;
+ void Cleanup() {}
+
+ public:
+ void GetMessage(nsAString& aValue) const {}
+};
+
+} // namespace webgpu
+} // namespace mozilla
+
+#endif // GPU_DeviceLostInfo_H_
diff --git a/dom/webgpu/Fence.cpp b/dom/webgpu/Fence.cpp
new file mode 100644
index 0000000000..5ac0293942
--- /dev/null
+++ b/dom/webgpu/Fence.cpp
@@ -0,0 +1,17 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "Fence.h"
+
+#include "mozilla/dom/WebGPUBinding.h"
+
+namespace mozilla {
+namespace webgpu {
+
+GPU_IMPL_CYCLE_COLLECTION(Fence, mParent)
+GPU_IMPL_JS_WRAP(Fence)
+
+} // namespace webgpu
+} // namespace mozilla
diff --git a/dom/webgpu/Fence.h b/dom/webgpu/Fence.h
new file mode 100644
index 0000000000..65507b9341
--- /dev/null
+++ b/dom/webgpu/Fence.h
@@ -0,0 +1,36 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_Fence_H_
+#define GPU_Fence_H_
+
+#include "nsWrapperCache.h"
+#include "ObjectModel.h"
+
+namespace mozilla {
+namespace dom {
+class Promise;
+} // namespace dom
+namespace webgpu {
+
+class Device;
+
+class Fence final : public ObjectBase, public ChildOf<Device> {
+ public:
+ GPU_DECL_CYCLE_COLLECTION(Fence)
+ GPU_DECL_JS_WRAP(Fence)
+
+ private:
+ Fence() = delete;
+ ~Fence() = default;
+ void Cleanup() {}
+
+ public:
+};
+
+} // namespace webgpu
+} // namespace mozilla
+
+#endif // GPU_Fence_H_
diff --git a/dom/webgpu/Instance.cpp b/dom/webgpu/Instance.cpp
new file mode 100644
index 0000000000..9eb53b136d
--- /dev/null
+++ b/dom/webgpu/Instance.cpp
@@ -0,0 +1,81 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "Instance.h"
+
+#include "Adapter.h"
+#include "gfxConfig.h"
+#include "nsIGlobalObject.h"
+#include "ipc/WebGPUChild.h"
+#include "ipc/WebGPUTypes.h"
+#include "mozilla/dom/Promise.h"
+#include "mozilla/layers/CompositorBridgeChild.h"
+
+namespace mozilla {
+namespace webgpu {
+
+GPU_IMPL_CYCLE_COLLECTION(Instance, mBridge, mOwner)
+
+/*static*/
+already_AddRefed<Instance> Instance::Create(nsIGlobalObject* aOwner) {
+ RefPtr<WebGPUChild> bridge;
+
+ if (gfx::gfxConfig::IsEnabled(gfx::Feature::WEBGPU)) {
+ bridge = layers::CompositorBridgeChild::Get()->GetWebGPUChild();
+ if (NS_WARN_IF(!bridge)) {
+ MOZ_CRASH("Failed to create an IPDL bridge for WebGPU!");
+ }
+ }
+
+ RefPtr<Instance> result = new Instance(aOwner, bridge);
+ return result.forget();
+}
+
+Instance::Instance(nsIGlobalObject* aOwner, WebGPUChild* aBridge)
+ : mBridge(aBridge), mOwner(aOwner) {}
+
+Instance::~Instance() { Cleanup(); }
+
+void Instance::Cleanup() {}
+
+JSObject* Instance::WrapObject(JSContext* cx,
+ JS::Handle<JSObject*> givenProto) {
+ return dom::GPU_Binding::Wrap(cx, this, givenProto);
+}
+
+already_AddRefed<dom::Promise> Instance::RequestAdapter(
+ const dom::GPURequestAdapterOptions& aOptions, ErrorResult& aRv) {
+ RefPtr<dom::Promise> promise = dom::Promise::Create(mOwner, aRv);
+ if (NS_WARN_IF(aRv.Failed())) {
+ return nullptr;
+ }
+ if (!mBridge) {
+ promise->MaybeRejectWithInvalidStateError("WebGPU is not enabled!");
+ return promise.forget();
+ }
+
+ RefPtr<Instance> instance = this;
+
+ mBridge->InstanceRequestAdapter(aOptions)->Then(
+ GetMainThreadSerialEventTarget(), __func__,
+ [promise, instance](RawId id) {
+ MOZ_ASSERT(id != 0);
+ RefPtr<Adapter> adapter = new Adapter(instance, id);
+ promise->MaybeResolve(adapter);
+ },
+ [promise](const Maybe<ipc::ResponseRejectReason>& aRv) {
+ if (aRv.isSome()) {
+ promise->MaybeRejectWithAbortError("Internal communication error!");
+ } else {
+ promise->MaybeRejectWithInvalidStateError(
+ "No matching adapter found!");
+ }
+ });
+
+ return promise.forget();
+}
+
+} // namespace webgpu
+} // namespace mozilla
diff --git a/dom/webgpu/Instance.h b/dom/webgpu/Instance.h
new file mode 100644
index 0000000000..da4da1cac5
--- /dev/null
+++ b/dom/webgpu/Instance.h
@@ -0,0 +1,53 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_INSTANCE_H_
+#define GPU_INSTANCE_H_
+
+#include "mozilla/AlreadyAddRefed.h"
+#include "mozilla/RefPtr.h"
+#include "nsCOMPtr.h"
+#include "ObjectModel.h"
+
+namespace mozilla {
+class ErrorResult;
+namespace dom {
+class Promise;
+struct GPURequestAdapterOptions;
+} // namespace dom
+
+namespace webgpu {
+class Adapter;
+class GPUAdapter;
+class WebGPUChild;
+
+class Instance final : public nsWrapperCache {
+ public:
+ GPU_DECL_CYCLE_COLLECTION(Instance)
+ GPU_DECL_JS_WRAP(Instance)
+
+ nsIGlobalObject* GetParentObject() const { return mOwner; }
+
+ static already_AddRefed<Instance> Create(nsIGlobalObject* aOwner);
+
+ already_AddRefed<dom::Promise> RequestAdapter(
+ const dom::GPURequestAdapterOptions& aOptions, ErrorResult& aRv);
+
+ RefPtr<WebGPUChild> mBridge;
+
+ private:
+ explicit Instance(nsIGlobalObject* aOwner, WebGPUChild* aBridge);
+ virtual ~Instance();
+ void Cleanup();
+
+ nsCOMPtr<nsIGlobalObject> mOwner;
+
+ public:
+};
+
+} // namespace webgpu
+} // namespace mozilla
+
+#endif // GPU_INSTANCE_H_
diff --git a/dom/webgpu/ObjectModel.cpp b/dom/webgpu/ObjectModel.cpp
new file mode 100644
index 0000000000..7792231cd9
--- /dev/null
+++ b/dom/webgpu/ObjectModel.cpp
@@ -0,0 +1,38 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "ObjectModel.h"
+
+#include "Adapter.h"
+#include "Device.h"
+#include "CommandEncoder.h"
+#include "Instance.h"
+#include "Texture.h"
+
+namespace mozilla {
+namespace webgpu {
+
+template <typename T>
+ChildOf<T>::ChildOf(T* const parent) : mParent(parent) {}
+
+template <typename T>
+ChildOf<T>::~ChildOf() = default;
+
+template <typename T>
+nsIGlobalObject* ChildOf<T>::GetParentObject() const {
+ return mParent->GetParentObject();
+}
+
+void ObjectBase::GetLabel(nsAString& aValue) const { aValue = mLabel; }
+void ObjectBase::SetLabel(const nsAString& aLabel) { mLabel = aLabel; }
+
+template class ChildOf<Adapter>;
+template class ChildOf<CommandEncoder>;
+template class ChildOf<Device>;
+template class ChildOf<Instance>;
+template class ChildOf<Texture>;
+
+} // namespace webgpu
+} // namespace mozilla
diff --git a/dom/webgpu/ObjectModel.h b/dom/webgpu/ObjectModel.h
new file mode 100644
index 0000000000..f56a6b49bc
--- /dev/null
+++ b/dom/webgpu/ObjectModel.h
@@ -0,0 +1,97 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_OBJECT_MODEL_H_
+#define GPU_OBJECT_MODEL_H_
+
+#include "nsWrapperCache.h"
+#include "nsString.h"
+
+class nsIGlobalObject;
+
+namespace mozilla {
+namespace webgpu {
+class WebGPUChild;
+
+template <typename T>
+class ChildOf {
+ protected:
+ explicit ChildOf(T* const parent);
+ virtual ~ChildOf();
+
+ RefPtr<T> mParent;
+
+ public:
+ nsIGlobalObject* GetParentObject() const;
+};
+
+class ObjectBase : public nsWrapperCache {
+ private:
+ nsString mLabel;
+
+ protected:
+ virtual ~ObjectBase() = default;
+ // Internal mutability model for WebGPU objects.
+ bool mValid = true;
+
+ public:
+ void GetLabel(nsAString& aValue) const;
+ void SetLabel(const nsAString& aLabel);
+};
+
+} // namespace webgpu
+} // namespace mozilla
+
+#define GPU_DECL_JS_WRAP(T) \
+ JSObject* WrapObject(JSContext* cx, JS::Handle<JSObject*> givenProto) \
+ override;
+
+#define GPU_DECL_CYCLE_COLLECTION(T) \
+ NS_DECL_CYCLE_COLLECTION_SCRIPT_HOLDER_NATIVE_CLASS(T) \
+ NS_INLINE_DECL_CYCLE_COLLECTING_NATIVE_REFCOUNTING(T)
+
+#define GPU_IMPL_JS_WRAP(T) \
+ JSObject* T::WrapObject(JSContext* cx, JS::Handle<JSObject*> givenProto) { \
+ return dom::GPU##T##_Binding::Wrap(cx, this, givenProto); \
+ }
+
+// Note: we don't use `NS_IMPL_CYCLE_COLLECTION_WRAPPERCACHE` directly
+// because there is a custom action we need to always do.
+#define GPU_IMPL_CYCLE_COLLECTION_WRAPPERCACHE(T, ...) \
+ NS_IMPL_CYCLE_COLLECTION_CLASS(T) \
+ NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(T) \
+ tmp->Cleanup(); \
+ NS_IMPL_CYCLE_COLLECTION_UNLINK(__VA_ARGS__) \
+ NS_IMPL_CYCLE_COLLECTION_UNLINK_PRESERVED_WRAPPER \
+ NS_IMPL_CYCLE_COLLECTION_UNLINK_END \
+ NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN(T) \
+ NS_IMPL_CYCLE_COLLECTION_TRAVERSE(__VA_ARGS__) \
+ NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END \
+ NS_IMPL_CYCLE_COLLECTION_TRACE_WRAPPERCACHE(T)
+
+#define GPU_IMPL_CYCLE_COLLECTION(T, ...) \
+ NS_IMPL_CYCLE_COLLECTION_ROOT_NATIVE(T, AddRef) \
+ NS_IMPL_CYCLE_COLLECTION_UNROOT_NATIVE(T, Release) \
+ GPU_IMPL_CYCLE_COLLECTION_WRAPPERCACHE(T, __VA_ARGS__)
+
+template <typename T>
+void ImplCycleCollectionTraverse(nsCycleCollectionTraversalCallback& callback,
+ nsTArray<RefPtr<const T>>& field,
+ const char* name, uint32_t flags) {
+ for (auto& element : field) {
+ CycleCollectionNoteChild(callback, const_cast<T*>(element.get()), name,
+ flags);
+ }
+}
+
+template <typename T>
+void ImplCycleCollectionUnlink(nsTArray<RefPtr<const T>>& field) {
+ for (auto& element : field) {
+ ImplCycleCollectionUnlink(element);
+ }
+ field.Clear();
+}
+
+#endif // GPU_OBJECT_MODEL_H_
diff --git a/dom/webgpu/OutOfMemoryError.cpp b/dom/webgpu/OutOfMemoryError.cpp
new file mode 100644
index 0000000000..6b2fd29064
--- /dev/null
+++ b/dom/webgpu/OutOfMemoryError.cpp
@@ -0,0 +1,19 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "OutOfMemoryError.h"
+#include "Device.h"
+#include "mozilla/dom/WebGPUBinding.h"
+
+namespace mozilla {
+namespace webgpu {
+
+GPU_IMPL_CYCLE_COLLECTION(OutOfMemoryError, mParent)
+GPU_IMPL_JS_WRAP(OutOfMemoryError)
+
+OutOfMemoryError::~OutOfMemoryError() = default;
+
+} // namespace webgpu
+} // namespace mozilla
diff --git a/dom/webgpu/OutOfMemoryError.h b/dom/webgpu/OutOfMemoryError.h
new file mode 100644
index 0000000000..e634e396f1
--- /dev/null
+++ b/dom/webgpu/OutOfMemoryError.h
@@ -0,0 +1,35 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_OutOfMemoryError_H_
+#define GPU_OutOfMemoryError_H_
+
+#include "nsWrapperCache.h"
+#include "ObjectModel.h"
+
+namespace mozilla {
+namespace dom {
+class GlobalObject;
+} // namespace dom
+namespace webgpu {
+class Device;
+
+class OutOfMemoryError final : public nsWrapperCache, public ChildOf<Device> {
+ public:
+ GPU_DECL_CYCLE_COLLECTION(OutOfMemoryError)
+ GPU_DECL_JS_WRAP(OutOfMemoryError)
+ OutOfMemoryError() = delete;
+
+ private:
+ virtual ~OutOfMemoryError();
+ void Cleanup() {}
+
+ public:
+};
+
+} // namespace webgpu
+} // namespace mozilla
+
+#endif // GPU_OutOfMemoryError_H_
diff --git a/dom/webgpu/PipelineLayout.cpp b/dom/webgpu/PipelineLayout.cpp
new file mode 100644
index 0000000000..2a3d5707b0
--- /dev/null
+++ b/dom/webgpu/PipelineLayout.cpp
@@ -0,0 +1,34 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/dom/WebGPUBinding.h"
+#include "PipelineLayout.h"
+#include "ipc/WebGPUChild.h"
+
+#include "Device.h"
+
+namespace mozilla {
+namespace webgpu {
+
+GPU_IMPL_CYCLE_COLLECTION(PipelineLayout, mParent)
+GPU_IMPL_JS_WRAP(PipelineLayout)
+
+PipelineLayout::PipelineLayout(Device* const aParent, RawId aId)
+ : ChildOf(aParent), mId(aId) {}
+
+PipelineLayout::~PipelineLayout() { Cleanup(); }
+
+void PipelineLayout::Cleanup() {
+ if (mValid && mParent) {
+ mValid = false;
+ auto bridge = mParent->GetBridge();
+ if (bridge && bridge->IsOpen()) {
+ bridge->SendPipelineLayoutDestroy(mId);
+ }
+ }
+}
+
+} // namespace webgpu
+} // namespace mozilla
diff --git a/dom/webgpu/PipelineLayout.h b/dom/webgpu/PipelineLayout.h
new file mode 100644
index 0000000000..10746c32d6
--- /dev/null
+++ b/dom/webgpu/PipelineLayout.h
@@ -0,0 +1,35 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_PipelineLayout_H_
+#define GPU_PipelineLayout_H_
+
+#include "nsWrapperCache.h"
+#include "ObjectModel.h"
+#include "mozilla/webgpu/WebGPUTypes.h"
+
+namespace mozilla {
+namespace webgpu {
+
+class Device;
+
+class PipelineLayout final : public ObjectBase, public ChildOf<Device> {
+ public:
+ GPU_DECL_CYCLE_COLLECTION(PipelineLayout)
+ GPU_DECL_JS_WRAP(PipelineLayout)
+
+ PipelineLayout(Device* const aParent, RawId aId);
+
+ const RawId mId;
+
+ private:
+ virtual ~PipelineLayout();
+ void Cleanup();
+};
+
+} // namespace webgpu
+} // namespace mozilla
+
+#endif // GPU_PipelineLayout_H_
diff --git a/dom/webgpu/Queue.cpp b/dom/webgpu/Queue.cpp
new file mode 100644
index 0000000000..052b40b49a
--- /dev/null
+++ b/dom/webgpu/Queue.cpp
@@ -0,0 +1,126 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/dom/WebGPUBinding.h"
+#include "Queue.h"
+
+#include "CommandBuffer.h"
+#include "CommandEncoder.h"
+#include "ipc/WebGPUChild.h"
+#include "mozilla/ErrorResult.h"
+
+namespace mozilla {
+namespace webgpu {
+
+GPU_IMPL_CYCLE_COLLECTION(Queue, mParent, mBridge)
+GPU_IMPL_JS_WRAP(Queue)
+
+Queue::Queue(Device* const aParent, WebGPUChild* aBridge, RawId aId)
+ : ChildOf(aParent), mBridge(aBridge), mId(aId) {}
+
+Queue::~Queue() { Cleanup(); }
+
+void Queue::Submit(
+ const dom::Sequence<OwningNonNull<CommandBuffer>>& aCommandBuffers) {
+ nsTArray<RawId> list(aCommandBuffers.Length());
+ for (uint32_t i = 0; i < aCommandBuffers.Length(); ++i) {
+ auto idMaybe = aCommandBuffers[i]->Commit();
+ if (idMaybe) {
+ list.AppendElement(*idMaybe);
+ }
+ }
+
+ mBridge->SendQueueSubmit(mId, list);
+}
+
+void Queue::WriteBuffer(const Buffer& aBuffer, uint64_t aBufferOffset,
+ const dom::ArrayBuffer& aData, uint64_t aDataOffset,
+ const dom::Optional<uint64_t>& aSize,
+ ErrorResult& aRv) {
+ aData.ComputeState();
+ const auto checkedSize =
+ aSize.WasPassed() ? CheckedInt<size_t>(aSize.Value())
+ : CheckedInt<size_t>(aData.Length()) - aDataOffset;
+ if (!checkedSize.isValid()) {
+ aRv.ThrowRangeError("Mapped size is too large");
+ return;
+ }
+
+ const auto& size = checkedSize.value();
+ if (aDataOffset + size > aData.Length()) {
+ aRv.ThrowAbortError(nsPrintfCString("Wrong data size %" PRIuPTR, size));
+ return;
+ }
+
+ ipc::Shmem shmem;
+ if (!mBridge->AllocShmem(size, ipc::Shmem::SharedMemory::TYPE_BASIC,
+ &shmem)) {
+ aRv.ThrowAbortError(
+ nsPrintfCString("Unable to allocate shmem of size %" PRIuPTR, size));
+ return;
+ }
+
+ memcpy(shmem.get<uint8_t>(), aData.Data() + aDataOffset, size);
+ mBridge->SendQueueWriteBuffer(mId, aBuffer.mId, aBufferOffset,
+ std::move(shmem));
+}
+
+void Queue::WriteTexture(const dom::GPUTextureCopyView& aDestination,
+ const dom::ArrayBuffer& aData,
+ const dom::GPUTextureDataLayout& aDataLayout,
+ const dom::GPUExtent3D& aSize, ErrorResult& aRv) {
+ ffi::WGPUTextureCopyView copyView = {};
+ CommandEncoder::ConvertTextureCopyViewToFFI(aDestination, &copyView);
+ ffi::WGPUTextureDataLayout dataLayout = {};
+ CommandEncoder::ConvertTextureDataLayoutToFFI(aDataLayout, &dataLayout);
+ dataLayout.offset = 0; // our Shmem has the contents starting from 0.
+ ffi::WGPUExtent3d extent = {};
+ CommandEncoder::ConvertExtent3DToFFI(aSize, &extent);
+
+ const auto bpb = aDestination.mTexture->mBytesPerBlock;
+ if (!bpb) {
+ aRv.ThrowAbortError(nsPrintfCString("Invalid texture format"));
+ return;
+ }
+ if (extent.width == 0 || extent.height == 0 || extent.depth == 0) {
+ aRv.ThrowAbortError(nsPrintfCString("Invalid copy size"));
+ return;
+ }
+
+ // TODO: support block-compressed formats
+ aData.ComputeState();
+ const auto fullRows =
+ (CheckedInt<size_t>(extent.depth - 1) * aDataLayout.mRowsPerImage +
+ extent.height - 1);
+ const auto checkedSize = fullRows * aDataLayout.mBytesPerRow +
+ CheckedInt<size_t>(extent.width) * bpb.value();
+ if (!checkedSize.isValid()) {
+ aRv.ThrowRangeError("Mapped size is too large");
+ return;
+ }
+
+ const auto& size = checkedSize.value();
+ auto availableSize = aData.Length();
+ if (availableSize < aDataLayout.mOffset ||
+ size > (availableSize - aDataLayout.mOffset)) {
+ aRv.ThrowAbortError(nsPrintfCString("Wrong data size %" PRIuPTR, size));
+ return;
+ }
+
+ ipc::Shmem shmem;
+ if (!mBridge->AllocShmem(size, ipc::Shmem::SharedMemory::TYPE_BASIC,
+ &shmem)) {
+ aRv.ThrowAbortError(
+ nsPrintfCString("Unable to allocate shmem of size %" PRIuPTR, size));
+ return;
+ }
+
+ memcpy(shmem.get<uint8_t>(), aData.Data() + aDataLayout.mOffset, size);
+ mBridge->SendQueueWriteTexture(mId, copyView, std::move(shmem), dataLayout,
+ extent);
+}
+
+} // namespace webgpu
+} // namespace mozilla
diff --git a/dom/webgpu/Queue.h b/dom/webgpu/Queue.h
new file mode 100644
index 0000000000..8f420d3651
--- /dev/null
+++ b/dom/webgpu/Queue.h
@@ -0,0 +1,65 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_Queue_H_
+#define GPU_Queue_H_
+
+#include "nsWrapperCache.h"
+#include "ObjectModel.h"
+#include "mozilla/dom/TypedArray.h"
+
+namespace mozilla {
+class ErrorResult;
+namespace dom {
+class RangeEnforcedUnsignedLongSequenceOrGPUExtent3DDict;
+template <typename T>
+class Optional;
+template <typename T>
+class Sequence;
+struct TextureCopyView;
+struct TextureDataLayout;
+typedef RangeEnforcedUnsignedLongSequenceOrGPUExtent3DDict GPUExtent3D;
+} // namespace dom
+namespace webgpu {
+
+class Buffer;
+class CommandBuffer;
+class Device;
+class Fence;
+
+class Queue final : public ObjectBase, public ChildOf<Device> {
+ public:
+ GPU_DECL_CYCLE_COLLECTION(Queue)
+ GPU_DECL_JS_WRAP(Queue)
+
+ Queue(Device* const aParent, WebGPUChild* aBridge, RawId aId);
+
+ void Submit(
+ const dom::Sequence<OwningNonNull<CommandBuffer>>& aCommandBuffers);
+
+ void WriteBuffer(const Buffer& aBuffer, uint64_t aBufferOffset,
+ const dom::ArrayBuffer& adata, uint64_t aDataOffset,
+ const dom::Optional<uint64_t>& aSize, ErrorResult& aRv);
+
+ void WriteTexture(const dom::GPUTextureCopyView& aDestination,
+ const dom::ArrayBuffer& aData,
+ const dom::GPUTextureDataLayout& aDataLayout,
+ const dom::GPUExtent3D& aSize, ErrorResult& aRv);
+
+ private:
+ Queue() = delete;
+ virtual ~Queue();
+ void Cleanup() {}
+
+ RefPtr<WebGPUChild> mBridge;
+ const RawId mId;
+
+ public:
+};
+
+} // namespace webgpu
+} // namespace mozilla
+
+#endif // GPU_Queue_H_
diff --git a/dom/webgpu/RenderBundle.cpp b/dom/webgpu/RenderBundle.cpp
new file mode 100644
index 0000000000..5d57c8af2a
--- /dev/null
+++ b/dom/webgpu/RenderBundle.cpp
@@ -0,0 +1,20 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/dom/WebGPUBinding.h"
+#include "RenderBundle.h"
+
+#include "Device.h"
+
+namespace mozilla {
+namespace webgpu {
+
+RenderBundle::~RenderBundle() = default;
+
+GPU_IMPL_CYCLE_COLLECTION(RenderBundle, mParent)
+GPU_IMPL_JS_WRAP(RenderBundle)
+
+} // namespace webgpu
+} // namespace mozilla
diff --git a/dom/webgpu/RenderBundle.h b/dom/webgpu/RenderBundle.h
new file mode 100644
index 0000000000..cce65623f6
--- /dev/null
+++ b/dom/webgpu/RenderBundle.h
@@ -0,0 +1,31 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_RenderBundle_H_
+#define GPU_RenderBundle_H_
+
+#include "nsWrapperCache.h"
+#include "ObjectModel.h"
+
+namespace mozilla {
+namespace webgpu {
+
+class Device;
+
+class RenderBundle final : public ObjectBase, public ChildOf<Device> {
+ public:
+ GPU_DECL_CYCLE_COLLECTION(RenderBundle)
+ GPU_DECL_JS_WRAP(RenderBundle)
+
+ private:
+ RenderBundle() = delete;
+ virtual ~RenderBundle();
+ void Cleanup() {}
+};
+
+} // namespace webgpu
+} // namespace mozilla
+
+#endif // GPU_RenderBundle_H_
diff --git a/dom/webgpu/RenderBundleEncoder.cpp b/dom/webgpu/RenderBundleEncoder.cpp
new file mode 100644
index 0000000000..55cc1ebcc2
--- /dev/null
+++ b/dom/webgpu/RenderBundleEncoder.cpp
@@ -0,0 +1,18 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/dom/WebGPUBinding.h"
+#include "RenderBundleEncoder.h"
+
+#include "RenderBundle.h"
+
+namespace mozilla {
+namespace webgpu {
+
+GPU_IMPL_CYCLE_COLLECTION(RenderBundleEncoder, mParent)
+GPU_IMPL_JS_WRAP(RenderBundleEncoder)
+
+} // namespace webgpu
+} // namespace mozilla
diff --git a/dom/webgpu/RenderBundleEncoder.h b/dom/webgpu/RenderBundleEncoder.h
new file mode 100644
index 0000000000..fd96ba8bd9
--- /dev/null
+++ b/dom/webgpu/RenderBundleEncoder.h
@@ -0,0 +1,34 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_RenderBundleEncoder_H_
+#define GPU_RenderBundleEncoder_H_
+
+#include "ObjectModel.h"
+
+namespace mozilla {
+namespace webgpu {
+
+class Device;
+class RenderBundle;
+
+class RenderBundleEncoder final : public ObjectBase, public ChildOf<Device> {
+ public:
+ GPU_DECL_CYCLE_COLLECTION(RenderBundleEncoder)
+ GPU_DECL_JS_WRAP(RenderBundleEncoder)
+
+ RenderBundleEncoder() = delete;
+
+ private:
+ ~RenderBundleEncoder() = default;
+ void Cleanup() {}
+
+ public:
+};
+
+} // namespace webgpu
+} // namespace mozilla
+
+#endif // GPU_RenderBundleEncoder_H_
diff --git a/dom/webgpu/RenderPassEncoder.cpp b/dom/webgpu/RenderPassEncoder.cpp
new file mode 100644
index 0000000000..3ade65244e
--- /dev/null
+++ b/dom/webgpu/RenderPassEncoder.cpp
@@ -0,0 +1,227 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/dom/WebGPUBinding.h"
+#include "RenderPassEncoder.h"
+#include "BindGroup.h"
+#include "CommandEncoder.h"
+#include "RenderPipeline.h"
+#include "mozilla/webgpu/ffi/wgpu.h"
+
+namespace mozilla {
+namespace webgpu {
+
+GPU_IMPL_CYCLE_COLLECTION(RenderPassEncoder, mParent, mUsedBindGroups,
+ mUsedBuffers, mUsedPipelines, mUsedTextureViews)
+GPU_IMPL_JS_WRAP(RenderPassEncoder)
+
+ffi::WGPURenderPass* ScopedFfiRenderTraits::empty() { return nullptr; }
+
+void ScopedFfiRenderTraits::release(ffi::WGPURenderPass* raw) {
+ if (raw) {
+ ffi::wgpu_render_pass_destroy(raw);
+ }
+}
+
+ffi::WGPULoadOp ConvertLoadOp(const dom::GPULoadOp& aOp) {
+ switch (aOp) {
+ case dom::GPULoadOp::Load:
+ return ffi::WGPULoadOp_Load;
+ default:
+ MOZ_CRASH("Unexpected load op");
+ }
+}
+
+ffi::WGPUStoreOp ConvertStoreOp(const dom::GPUStoreOp& aOp) {
+ switch (aOp) {
+ case dom::GPUStoreOp::Store:
+ return ffi::WGPUStoreOp_Store;
+ case dom::GPUStoreOp::Clear:
+ return ffi::WGPUStoreOp_Clear;
+ default:
+ MOZ_CRASH("Unexpected load op");
+ }
+}
+
+ffi::WGPUColor ConvertColor(const dom::GPUColorDict& aColor) {
+ ffi::WGPUColor color = {aColor.mR, aColor.mG, aColor.mB, aColor.mA};
+ return color;
+}
+
+ffi::WGPURenderPass* BeginRenderPass(
+ RawId aEncoderId, const dom::GPURenderPassDescriptor& aDesc) {
+ ffi::WGPURenderPassDescriptor desc = {};
+
+ ffi::WGPUDepthStencilAttachmentDescriptor dsDesc = {};
+ if (aDesc.mDepthStencilAttachment.WasPassed()) {
+ const auto& dsa = aDesc.mDepthStencilAttachment.Value();
+ dsDesc.attachment = dsa.mAttachment->mId;
+
+ if (dsa.mDepthLoadValue.IsFloat()) {
+ dsDesc.depth.load_op = ffi::WGPULoadOp_Clear;
+ dsDesc.depth.clear_value = dsa.mDepthLoadValue.GetAsFloat();
+ }
+ if (dsa.mDepthLoadValue.IsGPULoadOp()) {
+ dsDesc.depth.load_op =
+ ConvertLoadOp(dsa.mDepthLoadValue.GetAsGPULoadOp());
+ }
+ dsDesc.depth.store_op = ConvertStoreOp(dsa.mDepthStoreOp);
+
+ if (dsa.mStencilLoadValue.IsRangeEnforcedUnsignedLong()) {
+ dsDesc.stencil.load_op = ffi::WGPULoadOp_Clear;
+ dsDesc.stencil.clear_value =
+ dsa.mStencilLoadValue.GetAsRangeEnforcedUnsignedLong();
+ }
+ if (dsa.mStencilLoadValue.IsGPULoadOp()) {
+ dsDesc.stencil.load_op =
+ ConvertLoadOp(dsa.mStencilLoadValue.GetAsGPULoadOp());
+ }
+ dsDesc.stencil.store_op = ConvertStoreOp(dsa.mStencilStoreOp);
+
+ desc.depth_stencil_attachment = &dsDesc;
+ }
+
+ std::array<ffi::WGPUColorAttachmentDescriptor, WGPUMAX_COLOR_TARGETS>
+ colorDescs = {};
+ desc.color_attachments = colorDescs.data();
+ desc.color_attachments_length = aDesc.mColorAttachments.Length();
+
+ for (size_t i = 0; i < aDesc.mColorAttachments.Length(); ++i) {
+ const auto& ca = aDesc.mColorAttachments[i];
+ ffi::WGPUColorAttachmentDescriptor& cd = colorDescs[i];
+ cd.attachment = ca.mAttachment->mId;
+ cd.channel.store_op = ConvertStoreOp(ca.mStoreOp);
+
+ if (ca.mResolveTarget.WasPassed()) {
+ cd.resolve_target = ca.mResolveTarget.Value().mId;
+ }
+ if (ca.mLoadValue.IsGPULoadOp()) {
+ cd.channel.load_op = ConvertLoadOp(ca.mLoadValue.GetAsGPULoadOp());
+ } else {
+ cd.channel.load_op = ffi::WGPULoadOp_Clear;
+ if (ca.mLoadValue.IsDoubleSequence()) {
+ const auto& seq = ca.mLoadValue.GetAsDoubleSequence();
+ if (seq.Length() >= 1) {
+ cd.channel.clear_value.r = seq[0];
+ }
+ if (seq.Length() >= 2) {
+ cd.channel.clear_value.g = seq[1];
+ }
+ if (seq.Length() >= 3) {
+ cd.channel.clear_value.b = seq[2];
+ }
+ if (seq.Length() >= 4) {
+ cd.channel.clear_value.a = seq[3];
+ }
+ }
+ if (ca.mLoadValue.IsGPUColorDict()) {
+ cd.channel.clear_value =
+ ConvertColor(ca.mLoadValue.GetAsGPUColorDict());
+ }
+ }
+ }
+
+ return ffi::wgpu_command_encoder_begin_render_pass(aEncoderId, &desc);
+}
+
+RenderPassEncoder::RenderPassEncoder(CommandEncoder* const aParent,
+ const dom::GPURenderPassDescriptor& aDesc)
+ : ChildOf(aParent), mPass(BeginRenderPass(aParent->mId, aDesc)) {
+ for (const auto& at : aDesc.mColorAttachments) {
+ mUsedTextureViews.AppendElement(at.mAttachment);
+ }
+ if (aDesc.mDepthStencilAttachment.WasPassed()) {
+ mUsedTextureViews.AppendElement(
+ aDesc.mDepthStencilAttachment.Value().mAttachment);
+ }
+}
+
+RenderPassEncoder::~RenderPassEncoder() {
+ if (mValid) {
+ mValid = false;
+ }
+}
+
+void RenderPassEncoder::SetBindGroup(
+ uint32_t aSlot, const BindGroup& aBindGroup,
+ const dom::Sequence<uint32_t>& aDynamicOffsets) {
+ if (mValid) {
+ mUsedBindGroups.AppendElement(&aBindGroup);
+ ffi::wgpu_render_pass_set_bind_group(mPass, aSlot, aBindGroup.mId,
+ aDynamicOffsets.Elements(),
+ aDynamicOffsets.Length());
+ }
+}
+
+void RenderPassEncoder::SetPipeline(const RenderPipeline& aPipeline) {
+ if (mValid) {
+ mUsedPipelines.AppendElement(&aPipeline);
+ ffi::wgpu_render_pass_set_pipeline(mPass, aPipeline.mId);
+ }
+}
+
+void RenderPassEncoder::SetIndexBuffer(const Buffer& aBuffer, uint64_t aOffset,
+ uint64_t aSize) {
+ if (mValid) {
+ mUsedBuffers.AppendElement(&aBuffer);
+ ffi::wgpu_render_pass_set_index_buffer(mPass, aBuffer.mId, aOffset, aSize);
+ }
+}
+
+void RenderPassEncoder::SetVertexBuffer(uint32_t aSlot, const Buffer& aBuffer,
+ uint64_t aOffset, uint64_t aSize) {
+ if (mValid) {
+ mUsedBuffers.AppendElement(&aBuffer);
+ ffi::wgpu_render_pass_set_vertex_buffer(mPass, aSlot, aBuffer.mId, aOffset,
+ aSize);
+ }
+}
+
+void RenderPassEncoder::Draw(uint32_t aVertexCount, uint32_t aInstanceCount,
+ uint32_t aFirstVertex, uint32_t aFirstInstance) {
+ if (mValid) {
+ ffi::wgpu_render_pass_draw(mPass, aVertexCount, aInstanceCount,
+ aFirstVertex, aFirstInstance);
+ }
+}
+
+void RenderPassEncoder::DrawIndexed(uint32_t aIndexCount,
+ uint32_t aInstanceCount,
+ uint32_t aFirstIndex, int32_t aBaseVertex,
+ uint32_t aFirstInstance) {
+ if (mValid) {
+ ffi::wgpu_render_pass_draw_indexed(mPass, aIndexCount, aInstanceCount,
+ aFirstIndex, aBaseVertex,
+ aFirstInstance);
+ }
+}
+
+void RenderPassEncoder::DrawIndirect(const Buffer& aIndirectBuffer,
+ uint64_t aIndirectOffset) {
+ if (mValid) {
+ ffi::wgpu_render_pass_draw_indirect(mPass, aIndirectBuffer.mId,
+ aIndirectOffset);
+ }
+}
+
+void RenderPassEncoder::DrawIndexedIndirect(const Buffer& aIndirectBuffer,
+ uint64_t aIndirectOffset) {
+ if (mValid) {
+ ffi::wgpu_render_pass_draw_indexed_indirect(mPass, aIndirectBuffer.mId,
+ aIndirectOffset);
+ }
+}
+
+void RenderPassEncoder::EndPass(ErrorResult& aRv) {
+ if (mValid) {
+ mValid = false;
+ auto* pass = mPass.forget();
+ MOZ_ASSERT(pass);
+ mParent->EndRenderPass(*pass, aRv);
+ }
+}
+
+} // namespace webgpu
+} // namespace mozilla
diff --git a/dom/webgpu/RenderPassEncoder.h b/dom/webgpu/RenderPassEncoder.h
new file mode 100644
index 0000000000..93e04d0b79
--- /dev/null
+++ b/dom/webgpu/RenderPassEncoder.h
@@ -0,0 +1,79 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_RenderPassEncoder_H_
+#define GPU_RenderPassEncoder_H_
+
+#include "mozilla/Scoped.h"
+#include "mozilla/dom/TypedArray.h"
+#include "ObjectModel.h"
+
+namespace mozilla {
+namespace dom {
+class DoubleSequenceOrGPUColorDict;
+template <typename T>
+class Sequence;
+namespace binding_detail {
+template <typename T>
+class AutoSequence;
+} // namespace binding_detail
+} // namespace dom
+namespace webgpu {
+namespace ffi {
+struct WGPURenderPass;
+} // namespace ffi
+
+class CommandEncoder;
+class RenderBundle;
+class RenderPipeline;
+
+struct ScopedFfiRenderTraits {
+ typedef ffi::WGPURenderPass* type;
+ static type empty();
+ static void release(type raw);
+};
+
+class RenderPassEncoder final : public ObjectBase,
+ public ChildOf<CommandEncoder> {
+ public:
+ GPU_DECL_CYCLE_COLLECTION(RenderPassEncoder)
+ GPU_DECL_JS_WRAP(RenderPassEncoder)
+
+ RenderPassEncoder(CommandEncoder* const aParent,
+ const dom::GPURenderPassDescriptor& aDesc);
+
+ protected:
+ virtual ~RenderPassEncoder();
+ void Cleanup() {}
+
+ Scoped<ScopedFfiRenderTraits> mPass;
+ // keep all the used objects alive while the pass is recorded
+ nsTArray<RefPtr<const BindGroup>> mUsedBindGroups;
+ nsTArray<RefPtr<const Buffer>> mUsedBuffers;
+ nsTArray<RefPtr<const RenderPipeline>> mUsedPipelines;
+ nsTArray<RefPtr<const TextureView>> mUsedTextureViews;
+
+ public:
+ void SetBindGroup(uint32_t aSlot, const BindGroup& aBindGroup,
+ const dom::Sequence<uint32_t>& aDynamicOffsets);
+ void SetPipeline(const RenderPipeline& aPipeline);
+ void SetIndexBuffer(const Buffer& aBuffer, uint64_t aOffset, uint64_t aSize);
+ void SetVertexBuffer(uint32_t aSlot, const Buffer& aBuffer, uint64_t aOffset,
+ uint64_t aSize);
+ void Draw(uint32_t aVertexCount, uint32_t aInstanceCount,
+ uint32_t aFirstVertex, uint32_t aFirstInstance);
+ void DrawIndexed(uint32_t aIndexCount, uint32_t aInstanceCount,
+ uint32_t aFirstIndex, int32_t aBaseVertex,
+ uint32_t aFirstInstance);
+ void DrawIndirect(const Buffer& aIndirectBuffer, uint64_t aIndirectOffset);
+ void DrawIndexedIndirect(const Buffer& aIndirectBuffer,
+ uint64_t aIndirectOffset);
+ void EndPass(ErrorResult& aRv);
+};
+
+} // namespace webgpu
+} // namespace mozilla
+
+#endif // GPU_RenderPassEncoder_H_
diff --git a/dom/webgpu/RenderPipeline.cpp b/dom/webgpu/RenderPipeline.cpp
new file mode 100644
index 0000000000..c18baf3c80
--- /dev/null
+++ b/dom/webgpu/RenderPipeline.cpp
@@ -0,0 +1,44 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "RenderPipeline.h"
+
+#include "Device.h"
+#include "ipc/WebGPUChild.h"
+#include "mozilla/dom/WebGPUBinding.h"
+
+namespace mozilla {
+namespace webgpu {
+
+GPU_IMPL_CYCLE_COLLECTION(RenderPipeline, mParent)
+GPU_IMPL_JS_WRAP(RenderPipeline)
+
+RenderPipeline::RenderPipeline(Device* const aParent, RawId aId,
+ nsTArray<RawId>&& aImplicitBindGroupLayoutIds)
+ : ChildOf(aParent),
+ mImplicitBindGroupLayoutIds(std::move(aImplicitBindGroupLayoutIds)),
+ mId(aId) {}
+
+RenderPipeline::~RenderPipeline() { Cleanup(); }
+
+void RenderPipeline::Cleanup() {
+ if (mValid && mParent) {
+ mValid = false;
+ auto bridge = mParent->GetBridge();
+ if (bridge && bridge->IsOpen()) {
+ bridge->SendRenderPipelineDestroy(mId);
+ }
+ }
+}
+
+already_AddRefed<BindGroupLayout> RenderPipeline::GetBindGroupLayout(
+ uint32_t index) const {
+ RefPtr<BindGroupLayout> object =
+ new BindGroupLayout(mParent, mImplicitBindGroupLayoutIds[index]);
+ return object.forget();
+}
+
+} // namespace webgpu
+} // namespace mozilla
diff --git a/dom/webgpu/RenderPipeline.h b/dom/webgpu/RenderPipeline.h
new file mode 100644
index 0000000000..991497c8fa
--- /dev/null
+++ b/dom/webgpu/RenderPipeline.h
@@ -0,0 +1,41 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_RenderPipeline_H_
+#define GPU_RenderPipeline_H_
+
+#include "nsWrapperCache.h"
+#include "ObjectModel.h"
+#include "mozilla/webgpu/WebGPUTypes.h"
+#include "nsTArray.h"
+
+namespace mozilla {
+namespace webgpu {
+
+class BindGroupLayout;
+class Device;
+
+class RenderPipeline final : public ObjectBase, public ChildOf<Device> {
+ const nsTArray<RawId> mImplicitBindGroupLayoutIds;
+
+ public:
+ GPU_DECL_CYCLE_COLLECTION(RenderPipeline)
+ GPU_DECL_JS_WRAP(RenderPipeline)
+
+ const RawId mId;
+
+ RenderPipeline(Device* const aParent, RawId aId,
+ nsTArray<RawId>&& aImplicitBindGroupLayoutIds);
+ already_AddRefed<BindGroupLayout> GetBindGroupLayout(uint32_t index) const;
+
+ private:
+ virtual ~RenderPipeline();
+ void Cleanup();
+};
+
+} // namespace webgpu
+} // namespace mozilla
+
+#endif // GPU_RenderPipeline_H_
diff --git a/dom/webgpu/Sampler.cpp b/dom/webgpu/Sampler.cpp
new file mode 100644
index 0000000000..342aa367ba
--- /dev/null
+++ b/dom/webgpu/Sampler.cpp
@@ -0,0 +1,34 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/dom/WebGPUBinding.h"
+#include "Sampler.h"
+#include "ipc/WebGPUChild.h"
+
+#include "Device.h"
+
+namespace mozilla {
+namespace webgpu {
+
+GPU_IMPL_CYCLE_COLLECTION(Sampler, mParent)
+GPU_IMPL_JS_WRAP(Sampler)
+
+Sampler::Sampler(Device* const aParent, RawId aId)
+ : ChildOf(aParent), mId(aId) {}
+
+Sampler::~Sampler() { Cleanup(); }
+
+void Sampler::Cleanup() {
+ if (mValid && mParent) {
+ mValid = false;
+ auto bridge = mParent->GetBridge();
+ if (bridge && bridge->IsOpen()) {
+ bridge->SendSamplerDestroy(mId);
+ }
+ }
+}
+
+} // namespace webgpu
+} // namespace mozilla
diff --git a/dom/webgpu/Sampler.h b/dom/webgpu/Sampler.h
new file mode 100644
index 0000000000..512d5fe247
--- /dev/null
+++ b/dom/webgpu/Sampler.h
@@ -0,0 +1,34 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_SAMPLER_H_
+#define GPU_SAMPLER_H_
+
+#include "nsWrapperCache.h"
+#include "ObjectModel.h"
+
+namespace mozilla {
+namespace webgpu {
+
+class Device;
+
+class Sampler final : public ObjectBase, public ChildOf<Device> {
+ public:
+ GPU_DECL_CYCLE_COLLECTION(Sampler)
+ GPU_DECL_JS_WRAP(Sampler)
+
+ Sampler(Device* const aParent, RawId aId);
+
+ const RawId mId;
+
+ private:
+ virtual ~Sampler();
+ void Cleanup();
+};
+
+} // namespace webgpu
+} // namespace mozilla
+
+#endif // GPU_SAMPLER_H_
diff --git a/dom/webgpu/ShaderModule.cpp b/dom/webgpu/ShaderModule.cpp
new file mode 100644
index 0000000000..26aec5aa1a
--- /dev/null
+++ b/dom/webgpu/ShaderModule.cpp
@@ -0,0 +1,34 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/dom/WebGPUBinding.h"
+#include "ShaderModule.h"
+#include "ipc/WebGPUChild.h"
+
+#include "Device.h"
+
+namespace mozilla {
+namespace webgpu {
+
+GPU_IMPL_CYCLE_COLLECTION(ShaderModule, mParent)
+GPU_IMPL_JS_WRAP(ShaderModule)
+
+ShaderModule::ShaderModule(Device* const aParent, RawId aId)
+ : ChildOf(aParent), mId(aId) {}
+
+ShaderModule::~ShaderModule() { Cleanup(); }
+
+void ShaderModule::Cleanup() {
+ if (mValid && mParent) {
+ mValid = false;
+ auto bridge = mParent->GetBridge();
+ if (bridge && bridge->IsOpen()) {
+ bridge->SendShaderModuleDestroy(mId);
+ }
+ }
+}
+
+} // namespace webgpu
+} // namespace mozilla
diff --git a/dom/webgpu/ShaderModule.h b/dom/webgpu/ShaderModule.h
new file mode 100644
index 0000000000..d64fce322e
--- /dev/null
+++ b/dom/webgpu/ShaderModule.h
@@ -0,0 +1,34 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_ShaderModule_H_
+#define GPU_ShaderModule_H_
+
+#include "nsWrapperCache.h"
+#include "ObjectModel.h"
+
+namespace mozilla {
+namespace webgpu {
+
+class Device;
+
+class ShaderModule final : public ObjectBase, public ChildOf<Device> {
+ public:
+ GPU_DECL_CYCLE_COLLECTION(ShaderModule)
+ GPU_DECL_JS_WRAP(ShaderModule)
+
+ ShaderModule(Device* const aParent, RawId aId);
+
+ const RawId mId;
+
+ private:
+ virtual ~ShaderModule();
+ void Cleanup();
+};
+
+} // namespace webgpu
+} // namespace mozilla
+
+#endif // GPU_ShaderModule_H_
diff --git a/dom/webgpu/SwapChain.cpp b/dom/webgpu/SwapChain.cpp
new file mode 100644
index 0000000000..96061689bf
--- /dev/null
+++ b/dom/webgpu/SwapChain.cpp
@@ -0,0 +1,51 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "SwapChain.h"
+#include "Texture.h"
+#include "mozilla/dom/WebGPUBinding.h"
+#include "ipc/WebGPUChild.h"
+
+namespace mozilla {
+namespace webgpu {
+
+GPU_IMPL_CYCLE_COLLECTION(SwapChain, mParent, mTexture)
+GPU_IMPL_JS_WRAP(SwapChain)
+
+SwapChain::SwapChain(const dom::GPUSwapChainDescriptor& aDesc,
+ const dom::GPUExtent3DDict& aExtent3D,
+ wr::ExternalImageId aExternalImageId,
+ gfx::SurfaceFormat aFormat)
+ : ChildOf(aDesc.mDevice),
+ mFormat(aFormat),
+ mTexture(aDesc.mDevice->InitSwapChain(aDesc, aExtent3D, aExternalImageId,
+ aFormat)) {}
+
+SwapChain::~SwapChain() { Cleanup(); }
+
+void SwapChain::Cleanup() {
+ if (mValid) {
+ mValid = false;
+ }
+}
+
+WebGPUChild* SwapChain::GetGpuBridge() const {
+ return mParent ? mParent->GetBridge().get() : nullptr;
+}
+
+void SwapChain::Destroy(wr::ExternalImageId aExternalImageId) {
+ if (mValid && mParent && mParent->GetBridge()) {
+ mValid = false;
+ auto bridge = mParent->GetBridge();
+ if (bridge && bridge->IsOpen()) {
+ bridge->SendSwapChainDestroy(aExternalImageId);
+ }
+ }
+}
+
+RefPtr<Texture> SwapChain::GetCurrentTexture() { return mTexture; }
+
+} // namespace webgpu
+} // namespace mozilla
diff --git a/dom/webgpu/SwapChain.h b/dom/webgpu/SwapChain.h
new file mode 100644
index 0000000000..48b1aab35e
--- /dev/null
+++ b/dom/webgpu/SwapChain.h
@@ -0,0 +1,50 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_SwapChain_H_
+#define GPU_SwapChain_H_
+
+#include "nsWrapperCache.h"
+#include "ObjectModel.h"
+#include "mozilla/webrender/WebRenderAPI.h"
+
+namespace mozilla {
+namespace dom {
+struct GPUExtent3DDict;
+struct GPUSwapChainDescriptor;
+} // namespace dom
+namespace webgpu {
+
+class Device;
+class Texture;
+
+class SwapChain final : public ObjectBase, public ChildOf<Device> {
+ public:
+ GPU_DECL_CYCLE_COLLECTION(SwapChain)
+ GPU_DECL_JS_WRAP(SwapChain)
+
+ SwapChain(const dom::GPUSwapChainDescriptor& aDesc,
+ const dom::GPUExtent3DDict& aExtent3D,
+ wr::ExternalImageId aExternalImageId, gfx::SurfaceFormat aFormat);
+
+ WebGPUChild* GetGpuBridge() const;
+ void Destroy(wr::ExternalImageId aExternalImageId);
+
+ const gfx::SurfaceFormat mFormat;
+
+ private:
+ virtual ~SwapChain();
+ void Cleanup();
+
+ RefPtr<Texture> mTexture;
+
+ public:
+ RefPtr<Texture> GetCurrentTexture();
+};
+
+} // namespace webgpu
+} // namespace mozilla
+
+#endif // GPU_SwapChain_H_
diff --git a/dom/webgpu/Texture.cpp b/dom/webgpu/Texture.cpp
new file mode 100644
index 0000000000..6901c783d8
--- /dev/null
+++ b/dom/webgpu/Texture.cpp
@@ -0,0 +1,103 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "Texture.h"
+
+#include "ipc/WebGPUChild.h"
+#include "mozilla/webgpu/ffi/wgpu.h"
+#include "mozilla/dom/HTMLCanvasElement.h"
+#include "mozilla/dom/WebGPUBinding.h"
+#include "TextureView.h"
+
+namespace mozilla {
+namespace webgpu {
+
+GPU_IMPL_CYCLE_COLLECTION(Texture, mParent)
+GPU_IMPL_JS_WRAP(Texture)
+
+static Maybe<uint8_t> GetBytesPerBlock(dom::GPUTextureFormat format) {
+ switch (format) {
+ case dom::GPUTextureFormat::R8unorm:
+ case dom::GPUTextureFormat::R8snorm:
+ case dom::GPUTextureFormat::R8uint:
+ case dom::GPUTextureFormat::R8sint:
+ return Some<uint8_t>(1u);
+ case dom::GPUTextureFormat::R16uint:
+ case dom::GPUTextureFormat::R16sint:
+ case dom::GPUTextureFormat::R16float:
+ case dom::GPUTextureFormat::Rg8unorm:
+ case dom::GPUTextureFormat::Rg8snorm:
+ case dom::GPUTextureFormat::Rg8uint:
+ case dom::GPUTextureFormat::Rg8sint:
+ return Some<uint8_t>(2u);
+ case dom::GPUTextureFormat::R32uint:
+ case dom::GPUTextureFormat::R32sint:
+ case dom::GPUTextureFormat::R32float:
+ case dom::GPUTextureFormat::Rg16uint:
+ case dom::GPUTextureFormat::Rg16sint:
+ case dom::GPUTextureFormat::Rg16float:
+ case dom::GPUTextureFormat::Rgba8unorm:
+ case dom::GPUTextureFormat::Rgba8unorm_srgb:
+ case dom::GPUTextureFormat::Rgba8snorm:
+ case dom::GPUTextureFormat::Rgba8uint:
+ case dom::GPUTextureFormat::Rgba8sint:
+ case dom::GPUTextureFormat::Bgra8unorm:
+ case dom::GPUTextureFormat::Bgra8unorm_srgb:
+ case dom::GPUTextureFormat::Rgb10a2unorm:
+ case dom::GPUTextureFormat::Rg11b10float:
+ return Some<uint8_t>(4u);
+ case dom::GPUTextureFormat::Rg32uint:
+ case dom::GPUTextureFormat::Rg32sint:
+ case dom::GPUTextureFormat::Rg32float:
+ case dom::GPUTextureFormat::Rgba16uint:
+ case dom::GPUTextureFormat::Rgba16sint:
+ case dom::GPUTextureFormat::Rgba16float:
+ return Some<uint8_t>(8u);
+ case dom::GPUTextureFormat::Rgba32uint:
+ case dom::GPUTextureFormat::Rgba32sint:
+ case dom::GPUTextureFormat::Rgba32float:
+ return Some<uint8_t>(16u);
+ case dom::GPUTextureFormat::Depth32float:
+ return Some<uint8_t>(4u);
+ case dom::GPUTextureFormat::Depth24plus:
+ case dom::GPUTextureFormat::Depth24plus_stencil8:
+ case dom::GPUTextureFormat::EndGuard_:
+ return Nothing();
+ }
+ return Nothing();
+}
+
+Texture::Texture(Device* const aParent, RawId aId,
+ const dom::GPUTextureDescriptor& aDesc)
+ : ChildOf(aParent),
+ mId(aId),
+ mBytesPerBlock(GetBytesPerBlock(aDesc.mFormat)) {}
+
+Texture::~Texture() { Cleanup(); }
+
+void Texture::Cleanup() {
+ if (mValid && mParent) {
+ mValid = false;
+ auto bridge = mParent->GetBridge();
+ if (bridge && bridge->IsOpen()) {
+ bridge->SendTextureDestroy(mId);
+ }
+ }
+}
+
+already_AddRefed<TextureView> Texture::CreateView(
+ const dom::GPUTextureViewDescriptor& aDesc) {
+ RawId id = mParent->GetBridge()->TextureCreateView(mId, mParent->mId, aDesc);
+ RefPtr<TextureView> view = new TextureView(this, id);
+ return view.forget();
+}
+
+void Texture::Destroy() {
+ // TODO: we don't have to implement it right now, but it's used by the
+ // examples
+}
+
+} // namespace webgpu
+} // namespace mozilla
diff --git a/dom/webgpu/Texture.h b/dom/webgpu/Texture.h
new file mode 100644
index 0000000000..d4c26f402d
--- /dev/null
+++ b/dom/webgpu/Texture.h
@@ -0,0 +1,56 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_Texture_H_
+#define GPU_Texture_H_
+
+#include "mozilla/WeakPtr.h"
+#include "nsWrapperCache.h"
+#include "ObjectModel.h"
+#include "mozilla/webgpu/WebGPUTypes.h"
+#include "mozilla/WeakPtr.h"
+
+namespace mozilla {
+namespace dom {
+struct GPUTextureDescriptor;
+struct GPUTextureViewDescriptor;
+class HTMLCanvasElement;
+} // namespace dom
+
+namespace webgpu {
+namespace ffi {
+struct WGPUTextureViewDescriptor;
+} // namespace ffi
+
+class Device;
+class TextureView;
+
+class Texture final : public ObjectBase, public ChildOf<Device> {
+ public:
+ GPU_DECL_CYCLE_COLLECTION(Texture)
+ GPU_DECL_JS_WRAP(Texture)
+
+ Texture(Device* const aParent, RawId aId,
+ const dom::GPUTextureDescriptor& aDesc);
+ Device* GetParentDevice() { return mParent; }
+ const RawId mId;
+ const Maybe<uint8_t> mBytesPerBlock;
+
+ WeakPtr<dom::HTMLCanvasElement> mTargetCanvasElement;
+
+ private:
+ virtual ~Texture();
+ void Cleanup();
+
+ public:
+ already_AddRefed<TextureView> CreateView(
+ const dom::GPUTextureViewDescriptor& aDesc);
+ void Destroy();
+};
+
+} // namespace webgpu
+} // namespace mozilla
+
+#endif // GPU_Texture_H_
diff --git a/dom/webgpu/TextureView.cpp b/dom/webgpu/TextureView.cpp
new file mode 100644
index 0000000000..da7d14bbe8
--- /dev/null
+++ b/dom/webgpu/TextureView.cpp
@@ -0,0 +1,39 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "TextureView.h"
+
+#include "Device.h"
+#include "mozilla/dom/HTMLCanvasElement.h"
+#include "mozilla/dom/WebGPUBinding.h"
+#include "ipc/WebGPUChild.h"
+
+namespace mozilla {
+namespace webgpu {
+
+GPU_IMPL_CYCLE_COLLECTION(TextureView, mParent)
+GPU_IMPL_JS_WRAP(TextureView)
+
+TextureView::TextureView(Texture* const aParent, RawId aId)
+ : ChildOf(aParent), mId(aId) {}
+
+TextureView::~TextureView() { Cleanup(); }
+
+dom::HTMLCanvasElement* TextureView::GetTargetCanvasElement() const {
+ return mParent->mTargetCanvasElement;
+} // namespace webgpu
+
+void TextureView::Cleanup() {
+ if (mValid && mParent && mParent->GetParentDevice()) {
+ mValid = false;
+ auto bridge = mParent->GetParentDevice()->GetBridge();
+ if (bridge && bridge->IsOpen()) {
+ bridge->SendTextureViewDestroy(mId);
+ }
+ }
+}
+
+} // namespace webgpu
+} // namespace mozilla
diff --git a/dom/webgpu/TextureView.h b/dom/webgpu/TextureView.h
new file mode 100644
index 0000000000..179b7c3c10
--- /dev/null
+++ b/dom/webgpu/TextureView.h
@@ -0,0 +1,39 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_TextureView_H_
+#define GPU_TextureView_H_
+
+#include "nsWrapperCache.h"
+#include "ObjectModel.h"
+#include "mozilla/webgpu/WebGPUTypes.h"
+
+namespace mozilla {
+namespace dom {
+class HTMLCanvasElement;
+} // namespace dom
+namespace webgpu {
+
+class Texture;
+
+class TextureView final : public ObjectBase, public ChildOf<Texture> {
+ public:
+ GPU_DECL_CYCLE_COLLECTION(TextureView)
+ GPU_DECL_JS_WRAP(TextureView)
+
+ TextureView(Texture* const aParent, RawId aId);
+ dom::HTMLCanvasElement* GetTargetCanvasElement() const;
+
+ const RawId mId;
+
+ private:
+ virtual ~TextureView();
+ void Cleanup();
+};
+
+} // namespace webgpu
+} // namespace mozilla
+
+#endif // GPU_TextureView_H_
diff --git a/dom/webgpu/ValidationError.cpp b/dom/webgpu/ValidationError.cpp
new file mode 100644
index 0000000000..287326dab0
--- /dev/null
+++ b/dom/webgpu/ValidationError.cpp
@@ -0,0 +1,31 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "ValidationError.h"
+#include "Device.h"
+#include "mozilla/dom/WebGPUBinding.h"
+
+namespace mozilla {
+namespace webgpu {
+
+GPU_IMPL_CYCLE_COLLECTION(ValidationError, mParent)
+GPU_IMPL_JS_WRAP(ValidationError)
+
+ValidationError::ValidationError(Device* aParent, const nsACString& aMessage)
+ : ChildOf(aParent), mMessage(aMessage) {}
+
+ValidationError::~ValidationError() = default;
+
+already_AddRefed<ValidationError> ValidationError::Constructor(
+ const dom::GlobalObject& aGlobal, const nsAString& aString) {
+ MOZ_CRASH("TODO");
+}
+
+void ValidationError::GetMessage(nsAString& aMessage) const {
+ CopyUTF8toUTF16(mMessage, aMessage);
+}
+
+} // namespace webgpu
+} // namespace mozilla
diff --git a/dom/webgpu/ValidationError.h b/dom/webgpu/ValidationError.h
new file mode 100644
index 0000000000..a46c39a2bf
--- /dev/null
+++ b/dom/webgpu/ValidationError.h
@@ -0,0 +1,40 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_ValidationError_H_
+#define GPU_ValidationError_H_
+
+#include "nsWrapperCache.h"
+#include "ObjectModel.h"
+
+namespace mozilla {
+namespace dom {
+class GlobalObject;
+} // namespace dom
+namespace webgpu {
+class Device;
+
+class ValidationError final : public nsWrapperCache, public ChildOf<Device> {
+ nsCString mMessage;
+
+ public:
+ GPU_DECL_CYCLE_COLLECTION(ValidationError)
+ GPU_DECL_JS_WRAP(ValidationError)
+ ValidationError(Device* aParent, const nsACString& aMessage);
+
+ private:
+ virtual ~ValidationError();
+ void Cleanup() {}
+
+ public:
+ static already_AddRefed<ValidationError> Constructor(
+ const dom::GlobalObject& aGlobal, const nsAString& aString);
+ void GetMessage(nsAString& aMessage) const;
+};
+
+} // namespace webgpu
+} // namespace mozilla
+
+#endif // GPU_ValidationError_H_
diff --git a/dom/webgpu/ffi/wgpu_ffi_generated.h b/dom/webgpu/ffi/wgpu_ffi_generated.h
new file mode 100644
index 0000000000..80cc69d6ad
--- /dev/null
+++ b/dom/webgpu/ffi/wgpu_ffi_generated.h
@@ -0,0 +1,2967 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* Generated with cbindgen:0.15.0 */
+
+/* DO NOT MODIFY THIS MANUALLY! This file was generated using cbindgen.
+ * To generate this file:
+ * 1. Get the latest cbindgen using `cargo install --force cbindgen`
+ * a. Alternatively, you can clone `https://github.com/eqrion/cbindgen` and use a tagged release
+ * 2. Run `rustup run nightly cbindgen toolkit/library/rust/ --lockfile Cargo.lock --crate wgpu_bindings -o dom/webgpu/ffi/wgpu_ffi_generated.h`
+ */
+
+struct WGPUByteBuf;
+typedef uint64_t WGPUNonZeroU64;
+typedef uint64_t WGPUOption_BufferSize;
+typedef uint32_t WGPUOption_NonZeroU32;
+typedef uint8_t WGPUOption_NonZeroU8;
+typedef uint64_t WGPUOption_AdapterId;
+typedef uint64_t WGPUOption_BufferId;
+typedef uint64_t WGPUOption_PipelineLayoutId;
+typedef uint64_t WGPUOption_SamplerId;
+typedef uint64_t WGPUOption_SurfaceId;
+typedef uint64_t WGPUOption_TextureViewId;
+
+
+#include <stdarg.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdlib.h>
+
+#define WGPUMAX_BIND_GROUPS 8
+
+#define WGPUMAX_COLOR_TARGETS 4
+
+#define WGPUMAX_MIP_LEVELS 16
+
+#define WGPUMAX_VERTEX_BUFFERS 16
+
+#define WGPUMAX_ANISOTROPY 16
+
+#define WGPUSHADER_STAGE_COUNT 3
+
+#define WGPUDESIRED_NUM_FRAMES 3
+
+/**
+ * Buffer-Texture copies must have [`bytes_per_row`] aligned to this number.
+ *
+ * This doesn't apply to [`Queue::write_texture`].
+ *
+ * [`bytes_per_row`]: TextureDataLayout::bytes_per_row
+ */
+#define WGPUCOPY_BYTES_PER_ROW_ALIGNMENT 256
+
+/**
+ * Alignment all push constants need
+ */
+#define WGPUPUSH_CONSTANT_ALIGNMENT 4
+
+/**
+ * How edges should be handled in texture addressing.
+ */
+enum WGPUAddressMode {
+ /**
+ * Clamp the value to the edge of the texture
+ *
+ * -0.25 -> 0.0
+ * 1.25 -> 1.0
+ */
+ WGPUAddressMode_ClampToEdge = 0,
+ /**
+ * Repeat the texture in a tiling fashion
+ *
+ * -0.25 -> 0.75
+ * 1.25 -> 0.25
+ */
+ WGPUAddressMode_Repeat = 1,
+ /**
+ * Repeat the texture, mirroring it every repeat
+ *
+ * -0.25 -> 0.25
+ * 1.25 -> 0.75
+ */
+ WGPUAddressMode_MirrorRepeat = 2,
+ /**
+ * Clamp the value to the border of the texture
+ * Requires feature [`Features::ADDRESS_MODE_CLAMP_TO_BORDER`]
+ *
+ * -0.25 -> border
+ * 1.25 -> border
+ */
+ WGPUAddressMode_ClampToBorder = 3,
+ /**
+ * Must be last for serialization purposes
+ */
+ WGPUAddressMode_Sentinel,
+};
+
+/**
+ * Alpha blend factor.
+ *
+ * Alpha blending is very complicated: see the OpenGL or Vulkan spec for more information.
+ */
+enum WGPUBlendFactor {
+ WGPUBlendFactor_Zero = 0,
+ WGPUBlendFactor_One = 1,
+ WGPUBlendFactor_SrcColor = 2,
+ WGPUBlendFactor_OneMinusSrcColor = 3,
+ WGPUBlendFactor_SrcAlpha = 4,
+ WGPUBlendFactor_OneMinusSrcAlpha = 5,
+ WGPUBlendFactor_DstColor = 6,
+ WGPUBlendFactor_OneMinusDstColor = 7,
+ WGPUBlendFactor_DstAlpha = 8,
+ WGPUBlendFactor_OneMinusDstAlpha = 9,
+ WGPUBlendFactor_SrcAlphaSaturated = 10,
+ WGPUBlendFactor_BlendColor = 11,
+ WGPUBlendFactor_OneMinusBlendColor = 12,
+ /**
+ * Must be last for serialization purposes
+ */
+ WGPUBlendFactor_Sentinel,
+};
+
+/**
+ * Alpha blend operation.
+ *
+ * Alpha blending is very complicated: see the OpenGL or Vulkan spec for more information.
+ */
+enum WGPUBlendOperation {
+ WGPUBlendOperation_Add = 0,
+ WGPUBlendOperation_Subtract = 1,
+ WGPUBlendOperation_ReverseSubtract = 2,
+ WGPUBlendOperation_Min = 3,
+ WGPUBlendOperation_Max = 4,
+ /**
+ * Must be last for serialization purposes
+ */
+ WGPUBlendOperation_Sentinel,
+};
+
+enum WGPUBufferMapAsyncStatus {
+ WGPUBufferMapAsyncStatus_Success,
+ WGPUBufferMapAsyncStatus_Error,
+ WGPUBufferMapAsyncStatus_Unknown,
+ WGPUBufferMapAsyncStatus_ContextLost,
+ /**
+ * Must be last for serialization purposes
+ */
+ WGPUBufferMapAsyncStatus_Sentinel,
+};
+
+/**
+ * Comparison function used for depth and stencil operations.
+ */
+enum WGPUCompareFunction {
+ /**
+ * Function never passes
+ */
+ WGPUCompareFunction_Never = 1,
+ /**
+ * Function passes if new value less than existing value
+ */
+ WGPUCompareFunction_Less = 2,
+ /**
+ * Function passes if new value is equal to existing value
+ */
+ WGPUCompareFunction_Equal = 3,
+ /**
+ * Function passes if new value is less than or equal to existing value
+ */
+ WGPUCompareFunction_LessEqual = 4,
+ /**
+ * Function passes if new value is greater than existing value
+ */
+ WGPUCompareFunction_Greater = 5,
+ /**
+ * Function passes if new value is not equal to existing value
+ */
+ WGPUCompareFunction_NotEqual = 6,
+ /**
+ * Function passes if new value is greater than or equal to existing value
+ */
+ WGPUCompareFunction_GreaterEqual = 7,
+ /**
+ * Function always passes
+ */
+ WGPUCompareFunction_Always = 8,
+ /**
+ * Must be last for serialization purposes
+ */
+ WGPUCompareFunction_Sentinel,
+};
+
+/**
+ * Type of faces to be culled.
+ */
+enum WGPUCullMode {
+ /**
+ * No faces should be culled
+ */
+ WGPUCullMode_None = 0,
+ /**
+ * Front faces should be culled
+ */
+ WGPUCullMode_Front = 1,
+ /**
+ * Back faces should be culled
+ */
+ WGPUCullMode_Back = 2,
+ /**
+ * Must be last for serialization purposes
+ */
+ WGPUCullMode_Sentinel,
+};
+
+/**
+ * Texel mixing mode when sampling between texels.
+ */
+enum WGPUFilterMode {
+ /**
+ * Nearest neighbor sampling.
+ *
+ * This creates a pixelated effect when used as a mag filter
+ */
+ WGPUFilterMode_Nearest = 0,
+ /**
+ * Linear Interpolation
+ *
+ * This makes textures smooth but blurry when used as a mag filter.
+ */
+ WGPUFilterMode_Linear = 1,
+ /**
+ * Must be last for serialization purposes
+ */
+ WGPUFilterMode_Sentinel,
+};
+
+/**
+ * Winding order which classifies the "front" face.
+ */
+enum WGPUFrontFace {
+ /**
+ * Triangles with vertices in counter clockwise order are considered the front face.
+ *
+ * This is the default with right handed coordinate spaces.
+ */
+ WGPUFrontFace_Ccw = 0,
+ /**
+ * Triangles with vertices in clockwise order are considered the front face.
+ *
+ * This is the default with left handed coordinate spaces.
+ */
+ WGPUFrontFace_Cw = 1,
+ /**
+ * Must be last for serialization purposes
+ */
+ WGPUFrontFace_Sentinel,
+};
+
+enum WGPUHostMap {
+ WGPUHostMap_Read,
+ WGPUHostMap_Write,
+ /**
+ * Must be last for serialization purposes
+ */
+ WGPUHostMap_Sentinel,
+};
+
+/**
+ * Format of indices used with pipeline.
+ */
+enum WGPUIndexFormat {
+ /**
+ * Indices are 16 bit unsigned integers.
+ */
+ WGPUIndexFormat_Uint16 = 0,
+ /**
+ * Indices are 32 bit unsigned integers.
+ */
+ WGPUIndexFormat_Uint32 = 1,
+ /**
+ * Must be last for serialization purposes
+ */
+ WGPUIndexFormat_Sentinel,
+};
+
+/**
+ * Rate that determines when vertex data is advanced.
+ */
+enum WGPUInputStepMode {
+ /**
+ * Input data is advanced every vertex. This is the standard value for vertex data.
+ */
+ WGPUInputStepMode_Vertex = 0,
+ /**
+ * Input data is advanced every instance.
+ */
+ WGPUInputStepMode_Instance = 1,
+ /**
+ * Must be last for serialization purposes
+ */
+ WGPUInputStepMode_Sentinel,
+};
+
+/**
+ * Operation to perform to the output attachment at the start of a renderpass.
+ */
+enum WGPULoadOp {
+ /**
+ * Clear the output attachment with the clear color. Clearing is faster than loading.
+ */
+ WGPULoadOp_Clear = 0,
+ /**
+ * Do not clear output attachment.
+ */
+ WGPULoadOp_Load = 1,
+ /**
+ * Must be last for serialization purposes
+ */
+ WGPULoadOp_Sentinel,
+};
+
+/**
+ * Type of drawing mode for polygons
+ */
+enum WGPUPolygonMode {
+ /**
+ * Polygons are filled
+ */
+ WGPUPolygonMode_Fill = 0,
+ /**
+ * Polygons are draw as line segments
+ */
+ WGPUPolygonMode_Line = 1,
+ /**
+ * Polygons are draw as points
+ */
+ WGPUPolygonMode_Point = 2,
+ /**
+ * Must be last for serialization purposes
+ */
+ WGPUPolygonMode_Sentinel,
+};
+
+/**
+ * Power Preference when choosing a physical adapter.
+ */
+enum WGPUPowerPreference {
+ /**
+ * Adapter that uses the least possible power. This is often an integerated GPU.
+ */
+ WGPUPowerPreference_LowPower = 0,
+ /**
+ * Adapter that has the highest performance. This is often a discrete GPU.
+ */
+ WGPUPowerPreference_HighPerformance = 1,
+ /**
+ * Must be last for serialization purposes
+ */
+ WGPUPowerPreference_Sentinel,
+};
+
+/**
+ * Primitive type the input mesh is composed of.
+ */
+enum WGPUPrimitiveTopology {
+ /**
+ * Vertex data is a list of points. Each vertex is a new point.
+ */
+ WGPUPrimitiveTopology_PointList = 0,
+ /**
+ * Vertex data is a list of lines. Each pair of vertices composes a new line.
+ *
+ * Vertices `0 1 2 3` create two lines `0 1` and `2 3`
+ */
+ WGPUPrimitiveTopology_LineList = 1,
+ /**
+ * Vertex data is a strip of lines. Each set of two adjacent vertices form a line.
+ *
+ * Vertices `0 1 2 3` create three lines `0 1`, `1 2`, and `2 3`.
+ */
+ WGPUPrimitiveTopology_LineStrip = 2,
+ /**
+ * Vertex data is a list of triangles. Each set of 3 vertices composes a new triangle.
+ *
+ * Vertices `0 1 2 3 4 5` create two triangles `0 1 2` and `3 4 5`
+ */
+ WGPUPrimitiveTopology_TriangleList = 3,
+ /**
+ * Vertex data is a triangle strip. Each set of three adjacent vertices form a triangle.
+ *
+ * Vertices `0 1 2 3 4 5` creates four triangles `0 1 2`, `2 1 3`, `3 2 4`, and `4 3 5`
+ */
+ WGPUPrimitiveTopology_TriangleStrip = 4,
+ /**
+ * Must be last for serialization purposes
+ */
+ WGPUPrimitiveTopology_Sentinel,
+};
+
+enum WGPURawBindingType {
+ WGPURawBindingType_UniformBuffer,
+ WGPURawBindingType_StorageBuffer,
+ WGPURawBindingType_ReadonlyStorageBuffer,
+ WGPURawBindingType_Sampler,
+ WGPURawBindingType_ComparisonSampler,
+ WGPURawBindingType_SampledTexture,
+ WGPURawBindingType_ReadonlyStorageTexture,
+ WGPURawBindingType_WriteonlyStorageTexture,
+ /**
+ * Must be last for serialization purposes
+ */
+ WGPURawBindingType_Sentinel,
+};
+
+/**
+ * Operation to perform on the stencil value.
+ */
+enum WGPUStencilOperation {
+ /**
+ * Keep stencil value unchanged.
+ */
+ WGPUStencilOperation_Keep = 0,
+ /**
+ * Set stencil value to zero.
+ */
+ WGPUStencilOperation_Zero = 1,
+ /**
+ * Replace stencil value with value provided in most recent call to [`RenderPass::set_stencil_reference`].
+ */
+ WGPUStencilOperation_Replace = 2,
+ /**
+ * Bitwise inverts stencil value.
+ */
+ WGPUStencilOperation_Invert = 3,
+ /**
+ * Increments stencil value by one, clamping on overflow.
+ */
+ WGPUStencilOperation_IncrementClamp = 4,
+ /**
+ * Decrements stencil value by one, clamping on underflow.
+ */
+ WGPUStencilOperation_DecrementClamp = 5,
+ /**
+ * Increments stencil value by one, wrapping on overflow.
+ */
+ WGPUStencilOperation_IncrementWrap = 6,
+ /**
+ * Decrements stencil value by one, wrapping on underflow.
+ */
+ WGPUStencilOperation_DecrementWrap = 7,
+ /**
+ * Must be last for serialization purposes
+ */
+ WGPUStencilOperation_Sentinel,
+};
+
+/**
+ * Operation to perform to the output attachment at the end of a renderpass.
+ */
+enum WGPUStoreOp {
+ /**
+ * Clear the render target. If you don't care about the contents of the target, this can be faster.
+ */
+ WGPUStoreOp_Clear = 0,
+ /**
+ * Store the result of the renderpass.
+ */
+ WGPUStoreOp_Store = 1,
+ /**
+ * Must be last for serialization purposes
+ */
+ WGPUStoreOp_Sentinel,
+};
+
+/**
+ * Kind of data the texture holds.
+ */
+enum WGPUTextureAspect {
+ /**
+ * Depth, Stencil, and Color.
+ */
+ WGPUTextureAspect_All,
+ /**
+ * Stencil.
+ */
+ WGPUTextureAspect_StencilOnly,
+ /**
+ * Depth.
+ */
+ WGPUTextureAspect_DepthOnly,
+ /**
+ * Must be last for serialization purposes
+ */
+ WGPUTextureAspect_Sentinel,
+};
+
+/**
+ * Dimensionality of a texture.
+ */
+enum WGPUTextureDimension {
+ /**
+ * 1D texture
+ */
+ WGPUTextureDimension_D1,
+ /**
+ * 2D texture
+ */
+ WGPUTextureDimension_D2,
+ /**
+ * 3D texture
+ */
+ WGPUTextureDimension_D3,
+ /**
+ * Must be last for serialization purposes
+ */
+ WGPUTextureDimension_Sentinel,
+};
+
+/**
+ * Underlying texture data format.
+ *
+ * If there is a conversion in the format (such as srgb -> linear), The conversion listed is for
+ * loading from texture in a shader. When writing to the texture, the opposite conversion takes place.
+ */
+enum WGPUTextureFormat {
+ /**
+ * Red channel only. 8 bit integer per channel. [0, 255] converted to/from float [0, 1] in shader.
+ */
+ WGPUTextureFormat_R8Unorm = 0,
+ /**
+ * Red channel only. 8 bit integer per channel. [-127, 127] converted to/from float [-1, 1] in shader.
+ */
+ WGPUTextureFormat_R8Snorm = 1,
+ /**
+ * Red channel only. 8 bit integer per channel. Unsigned in shader.
+ */
+ WGPUTextureFormat_R8Uint = 2,
+ /**
+ * Red channel only. 8 bit integer per channel. Signed in shader.
+ */
+ WGPUTextureFormat_R8Sint = 3,
+ /**
+ * Red channel only. 16 bit integer per channel. Unsigned in shader.
+ */
+ WGPUTextureFormat_R16Uint = 4,
+ /**
+ * Red channel only. 16 bit integer per channel. Signed in shader.
+ */
+ WGPUTextureFormat_R16Sint = 5,
+ /**
+ * Red channel only. 16 bit float per channel. Float in shader.
+ */
+ WGPUTextureFormat_R16Float = 6,
+ /**
+ * Red and green channels. 8 bit integer per channel. [0, 255] converted to/from float [0, 1] in shader.
+ */
+ WGPUTextureFormat_Rg8Unorm = 7,
+ /**
+ * Red and green channels. 8 bit integer per channel. [-127, 127] converted to/from float [-1, 1] in shader.
+ */
+ WGPUTextureFormat_Rg8Snorm = 8,
+ /**
+ * Red and green channels. 8 bit integer per channel. Unsigned in shader.
+ */
+ WGPUTextureFormat_Rg8Uint = 9,
+ /**
+ * Red and green channel s. 8 bit integer per channel. Signed in shader.
+ */
+ WGPUTextureFormat_Rg8Sint = 10,
+ /**
+ * Red channel only. 32 bit integer per channel. Unsigned in shader.
+ */
+ WGPUTextureFormat_R32Uint = 11,
+ /**
+ * Red channel only. 32 bit integer per channel. Signed in shader.
+ */
+ WGPUTextureFormat_R32Sint = 12,
+ /**
+ * Red channel only. 32 bit float per channel. Float in shader.
+ */
+ WGPUTextureFormat_R32Float = 13,
+ /**
+ * Red and green channels. 16 bit integer per channel. Unsigned in shader.
+ */
+ WGPUTextureFormat_Rg16Uint = 14,
+ /**
+ * Red and green channels. 16 bit integer per channel. Signed in shader.
+ */
+ WGPUTextureFormat_Rg16Sint = 15,
+ /**
+ * Red and green channels. 16 bit float per channel. Float in shader.
+ */
+ WGPUTextureFormat_Rg16Float = 16,
+ /**
+ * Red, green, blue, and alpha channels. 8 bit integer per channel. [0, 255] converted to/from float [0, 1] in shader.
+ */
+ WGPUTextureFormat_Rgba8Unorm = 17,
+ /**
+ * Red, green, blue, and alpha channels. 8 bit integer per channel. Srgb-color [0, 255] converted to/from linear-color float [0, 1] in shader.
+ */
+ WGPUTextureFormat_Rgba8UnormSrgb = 18,
+ /**
+ * Red, green, blue, and alpha channels. 8 bit integer per channel. [-127, 127] converted to/from float [-1, 1] in shader.
+ */
+ WGPUTextureFormat_Rgba8Snorm = 19,
+ /**
+ * Red, green, blue, and alpha channels. 8 bit integer per channel. Unsigned in shader.
+ */
+ WGPUTextureFormat_Rgba8Uint = 20,
+ /**
+ * Red, green, blue, and alpha channels. 8 bit integer per channel. Signed in shader.
+ */
+ WGPUTextureFormat_Rgba8Sint = 21,
+ /**
+ * Blue, green, red, and alpha channels. 8 bit integer per channel. [0, 255] converted to/from float [0, 1] in shader.
+ */
+ WGPUTextureFormat_Bgra8Unorm = 22,
+ /**
+ * Blue, green, red, and alpha channels. 8 bit integer per channel. Srgb-color [0, 255] converted to/from linear-color float [0, 1] in shader.
+ */
+ WGPUTextureFormat_Bgra8UnormSrgb = 23,
+ /**
+ * Red, green, blue, and alpha channels. 10 bit integer for RGB channels, 2 bit integer for alpha channel. [0, 1023] ([0, 3] for alpha) converted to/from float [0, 1] in shader.
+ */
+ WGPUTextureFormat_Rgb10a2Unorm = 24,
+ /**
+ * Red, green, and blue channels. 11 bit float with no sign bit for RG channels. 10 bit float with no sign bit for blue channel. Float in shader.
+ */
+ WGPUTextureFormat_Rg11b10Float = 25,
+ /**
+ * Red and green channels. 32 bit integer per channel. Unsigned in shader.
+ */
+ WGPUTextureFormat_Rg32Uint = 26,
+ /**
+ * Red and green channels. 32 bit integer per channel. Signed in shader.
+ */
+ WGPUTextureFormat_Rg32Sint = 27,
+ /**
+ * Red and green channels. 32 bit float per channel. Float in shader.
+ */
+ WGPUTextureFormat_Rg32Float = 28,
+ /**
+ * Red, green, blue, and alpha channels. 16 bit integer per channel. Unsigned in shader.
+ */
+ WGPUTextureFormat_Rgba16Uint = 29,
+ /**
+ * Red, green, blue, and alpha channels. 16 bit integer per channel. Signed in shader.
+ */
+ WGPUTextureFormat_Rgba16Sint = 30,
+ /**
+ * Red, green, blue, and alpha channels. 16 bit float per channel. Float in shader.
+ */
+ WGPUTextureFormat_Rgba16Float = 31,
+ /**
+ * Red, green, blue, and alpha channels. 32 bit integer per channel. Unsigned in shader.
+ */
+ WGPUTextureFormat_Rgba32Uint = 32,
+ /**
+ * Red, green, blue, and alpha channels. 32 bit integer per channel. Signed in shader.
+ */
+ WGPUTextureFormat_Rgba32Sint = 33,
+ /**
+ * Red, green, blue, and alpha channels. 32 bit float per channel. Float in shader.
+ */
+ WGPUTextureFormat_Rgba32Float = 34,
+ /**
+ * Special depth format with 32 bit floating point depth.
+ */
+ WGPUTextureFormat_Depth32Float = 35,
+ /**
+ * Special depth format with at least 24 bit integer depth.
+ */
+ WGPUTextureFormat_Depth24Plus = 36,
+ /**
+ * Special depth/stencil format with at least 24 bit integer depth and 8 bits integer stencil.
+ */
+ WGPUTextureFormat_Depth24PlusStencil8 = 37,
+ /**
+ * 4x4 block compressed texture. 8 bytes per block (4 bit/px). 4 color + alpha pallet. 5 bit R + 6 bit G + 5 bit B + 1 bit alpha.
+ * [0, 64] ([0, 1] for alpha) converted to/from float [0, 1] in shader.
+ *
+ * Also known as DXT1.
+ *
+ * [`Features::TEXTURE_COMPRESSION_BC`] must be enabled to use this texture format.
+ */
+ WGPUTextureFormat_Bc1RgbaUnorm = 38,
+ /**
+ * 4x4 block compressed texture. 8 bytes per block (4 bit/px). 4 color + alpha pallet. 5 bit R + 6 bit G + 5 bit B + 1 bit alpha.
+ * Srgb-color [0, 64] ([0, 16] for alpha) converted to/from linear-color float [0, 1] in shader.
+ *
+ * Also known as DXT1.
+ *
+ * [`Features::TEXTURE_COMPRESSION_BC`] must be enabled to use this texture format.
+ */
+ WGPUTextureFormat_Bc1RgbaUnormSrgb = 39,
+ /**
+ * 4x4 block compressed texture. 16 bytes per block (8 bit/px). 4 color pallet. 5 bit R + 6 bit G + 5 bit B + 4 bit alpha.
+ * [0, 64] ([0, 16] for alpha) converted to/from float [0, 1] in shader.
+ *
+ * Also known as DXT3.
+ *
+ * [`Features::TEXTURE_COMPRESSION_BC`] must be enabled to use this texture format.
+ */
+ WGPUTextureFormat_Bc2RgbaUnorm = 40,
+ /**
+ * 4x4 block compressed texture. 16 bytes per block (8 bit/px). 4 color pallet. 5 bit R + 6 bit G + 5 bit B + 4 bit alpha.
+ * Srgb-color [0, 64] ([0, 256] for alpha) converted to/from linear-color float [0, 1] in shader.
+ *
+ * Also known as DXT3.
+ *
+ * [`Features::TEXTURE_COMPRESSION_BC`] must be enabled to use this texture format.
+ */
+ WGPUTextureFormat_Bc2RgbaUnormSrgb = 41,
+ /**
+ * 4x4 block compressed texture. 16 bytes per block (8 bit/px). 4 color pallet + 8 alpha pallet. 5 bit R + 6 bit G + 5 bit B + 8 bit alpha.
+ * [0, 64] ([0, 256] for alpha) converted to/from float [0, 1] in shader.
+ *
+ * Also known as DXT5.
+ *
+ * [`Features::TEXTURE_COMPRESSION_BC`] must be enabled to use this texture format.
+ */
+ WGPUTextureFormat_Bc3RgbaUnorm = 42,
+ /**
+ * 4x4 block compressed texture. 16 bytes per block (8 bit/px). 4 color pallet + 8 alpha pallet. 5 bit R + 6 bit G + 5 bit B + 8 bit alpha.
+ * Srgb-color [0, 64] ([0, 256] for alpha) converted to/from linear-color float [0, 1] in shader.
+ *
+ * Also known as DXT5.
+ *
+ * [`Features::TEXTURE_COMPRESSION_BC`] must be enabled to use this texture format.
+ */
+ WGPUTextureFormat_Bc3RgbaUnormSrgb = 43,
+ /**
+ * 4x4 block compressed texture. 8 bytes per block (4 bit/px). 8 color pallet. 8 bit R.
+ * [0, 256] converted to/from float [0, 1] in shader.
+ *
+ * Also known as RGTC1.
+ *
+ * [`Features::TEXTURE_COMPRESSION_BC`] must be enabled to use this texture format.
+ */
+ WGPUTextureFormat_Bc4RUnorm = 44,
+ /**
+ * 4x4 block compressed texture. 8 bytes per block (4 bit/px). 8 color pallet. 8 bit R.
+ * [-127, 127] converted to/from float [-1, 1] in shader.
+ *
+ * Also known as RGTC1.
+ *
+ * [`Features::TEXTURE_COMPRESSION_BC`] must be enabled to use this texture format.
+ */
+ WGPUTextureFormat_Bc4RSnorm = 45,
+ /**
+ * 4x4 block compressed texture. 16 bytes per block (16 bit/px). 8 color red pallet + 8 color green pallet. 8 bit RG.
+ * [0, 256] converted to/from float [0, 1] in shader.
+ *
+ * Also known as RGTC2.
+ *
+ * [`Features::TEXTURE_COMPRESSION_BC`] must be enabled to use this texture format.
+ */
+ WGPUTextureFormat_Bc5RgUnorm = 46,
+ /**
+ * 4x4 block compressed texture. 16 bytes per block (16 bit/px). 8 color red pallet + 8 color green pallet. 8 bit RG.
+ * [-127, 127] converted to/from float [-1, 1] in shader.
+ *
+ * Also known as RGTC2.
+ *
+ * [`Features::TEXTURE_COMPRESSION_BC`] must be enabled to use this texture format.
+ */
+ WGPUTextureFormat_Bc5RgSnorm = 47,
+ /**
+ * 4x4 block compressed texture. 16 bytes per block (16 bit/px). Variable sized pallet. 16 bit unsigned float RGB. Float in shader.
+ *
+ * Also known as BPTC (float).
+ *
+ * [`Features::TEXTURE_COMPRESSION_BC`] must be enabled to use this texture format.
+ */
+ WGPUTextureFormat_Bc6hRgbUfloat = 48,
+ /**
+ * 4x4 block compressed texture. 16 bytes per block (16 bit/px). Variable sized pallet. 16 bit signed float RGB. Float in shader.
+ *
+ * Also known as BPTC (float).
+ *
+ * [`Features::TEXTURE_COMPRESSION_BC`] must be enabled to use this texture format.
+ */
+ WGPUTextureFormat_Bc6hRgbSfloat = 49,
+ /**
+ * 4x4 block compressed texture. 16 bytes per block (16 bit/px). Variable sized pallet. 8 bit integer RGBA.
+ * [0, 256] converted to/from float [0, 1] in shader.
+ *
+ * Also known as BPTC (unorm).
+ *
+ * [`Features::TEXTURE_COMPRESSION_BC`] must be enabled to use this texture format.
+ */
+ WGPUTextureFormat_Bc7RgbaUnorm = 50,
+ /**
+ * 4x4 block compressed texture. 16 bytes per block (16 bit/px). Variable sized pallet. 8 bit integer RGBA.
+ * Srgb-color [0, 255] converted to/from linear-color float [0, 1] in shader.
+ *
+ * Also known as BPTC (unorm).
+ *
+ * [`Features::TEXTURE_COMPRESSION_BC`] must be enabled to use this texture format.
+ */
+ WGPUTextureFormat_Bc7RgbaUnormSrgb = 51,
+ /**
+ * Must be last for serialization purposes
+ */
+ WGPUTextureFormat_Sentinel,
+};
+
+/**
+ * Dimensions of a particular texture view.
+ */
+enum WGPUTextureViewDimension {
+ /**
+ * A one dimensional texture. `texture1D` in glsl shaders.
+ */
+ WGPUTextureViewDimension_D1,
+ /**
+ * A two dimensional texture. `texture2D` in glsl shaders.
+ */
+ WGPUTextureViewDimension_D2,
+ /**
+ * A two dimensional array texture. `texture2DArray` in glsl shaders.
+ */
+ WGPUTextureViewDimension_D2Array,
+ /**
+ * A cubemap texture. `textureCube` in glsl shaders.
+ */
+ WGPUTextureViewDimension_Cube,
+ /**
+ * A cubemap array texture. `textureCubeArray` in glsl shaders.
+ */
+ WGPUTextureViewDimension_CubeArray,
+ /**
+ * A three dimensional texture. `texture3D` in glsl shaders.
+ */
+ WGPUTextureViewDimension_D3,
+ /**
+ * Must be last for serialization purposes
+ */
+ WGPUTextureViewDimension_Sentinel,
+};
+
+/**
+ * Vertex Format for a Vertex Attribute (input).
+ */
+enum WGPUVertexFormat {
+ /**
+ * Two unsigned bytes (u8). `uvec2` in shaders.
+ */
+ WGPUVertexFormat_Uchar2 = 0,
+ /**
+ * Four unsigned bytes (u8). `uvec4` in shaders.
+ */
+ WGPUVertexFormat_Uchar4 = 1,
+ /**
+ * Two signed bytes (i8). `ivec2` in shaders.
+ */
+ WGPUVertexFormat_Char2 = 2,
+ /**
+ * Four signed bytes (i8). `ivec4` in shaders.
+ */
+ WGPUVertexFormat_Char4 = 3,
+ /**
+ * Two unsigned bytes (u8). [0, 255] converted to float [0, 1] `vec2` in shaders.
+ */
+ WGPUVertexFormat_Uchar2Norm = 4,
+ /**
+ * Four unsigned bytes (u8). [0, 255] converted to float [0, 1] `vec4` in shaders.
+ */
+ WGPUVertexFormat_Uchar4Norm = 5,
+ /**
+ * Two signed bytes (i8). [-127, 127] converted to float [-1, 1] `vec2` in shaders.
+ */
+ WGPUVertexFormat_Char2Norm = 6,
+ /**
+ * Four signed bytes (i8). [-127, 127] converted to float [-1, 1] `vec4` in shaders.
+ */
+ WGPUVertexFormat_Char4Norm = 7,
+ /**
+ * Two unsigned shorts (u16). `uvec2` in shaders.
+ */
+ WGPUVertexFormat_Ushort2 = 8,
+ /**
+ * Four unsigned shorts (u16). `uvec4` in shaders.
+ */
+ WGPUVertexFormat_Ushort4 = 9,
+ /**
+ * Two signed shorts (i16). `ivec2` in shaders.
+ */
+ WGPUVertexFormat_Short2 = 10,
+ /**
+ * Four signed shorts (i16). `ivec4` in shaders.
+ */
+ WGPUVertexFormat_Short4 = 11,
+ /**
+ * Two unsigned shorts (u16). [0, 65535] converted to float [0, 1] `vec2` in shaders.
+ */
+ WGPUVertexFormat_Ushort2Norm = 12,
+ /**
+ * Four unsigned shorts (u16). [0, 65535] converted to float [0, 1] `vec4` in shaders.
+ */
+ WGPUVertexFormat_Ushort4Norm = 13,
+ /**
+ * Two signed shorts (i16). [-32767, 32767] converted to float [-1, 1] `vec2` in shaders.
+ */
+ WGPUVertexFormat_Short2Norm = 14,
+ /**
+ * Four signed shorts (i16). [-32767, 32767] converted to float [-1, 1] `vec4` in shaders.
+ */
+ WGPUVertexFormat_Short4Norm = 15,
+ /**
+ * Two half-precision floats (no Rust equiv). `vec2` in shaders.
+ */
+ WGPUVertexFormat_Half2 = 16,
+ /**
+ * Four half-precision floats (no Rust equiv). `vec4` in shaders.
+ */
+ WGPUVertexFormat_Half4 = 17,
+ /**
+ * One single-precision float (f32). `float` in shaders.
+ */
+ WGPUVertexFormat_Float = 18,
+ /**
+ * Two single-precision floats (f32). `vec2` in shaders.
+ */
+ WGPUVertexFormat_Float2 = 19,
+ /**
+ * Three single-precision floats (f32). `vec3` in shaders.
+ */
+ WGPUVertexFormat_Float3 = 20,
+ /**
+ * Four single-precision floats (f32). `vec4` in shaders.
+ */
+ WGPUVertexFormat_Float4 = 21,
+ /**
+ * One unsigned int (u32). `uint` in shaders.
+ */
+ WGPUVertexFormat_Uint = 22,
+ /**
+ * Two unsigned ints (u32). `uvec2` in shaders.
+ */
+ WGPUVertexFormat_Uint2 = 23,
+ /**
+ * Three unsigned ints (u32). `uvec3` in shaders.
+ */
+ WGPUVertexFormat_Uint3 = 24,
+ /**
+ * Four unsigned ints (u32). `uvec4` in shaders.
+ */
+ WGPUVertexFormat_Uint4 = 25,
+ /**
+ * One signed int (i32). `int` in shaders.
+ */
+ WGPUVertexFormat_Int = 26,
+ /**
+ * Two signed ints (i32). `ivec2` in shaders.
+ */
+ WGPUVertexFormat_Int2 = 27,
+ /**
+ * Three signed ints (i32). `ivec3` in shaders.
+ */
+ WGPUVertexFormat_Int3 = 28,
+ /**
+ * Four signed ints (i32). `ivec4` in shaders.
+ */
+ WGPUVertexFormat_Int4 = 29,
+ /**
+ * Must be last for serialization purposes
+ */
+ WGPUVertexFormat_Sentinel,
+};
+
+/**
+ * The internal enum mirrored from `BufferUsage`. The values don't have to match!
+ */
+struct WGPUBufferUse;
+
+struct WGPUClient;
+
+struct WGPUComputePass;
+
+struct WGPUGlobal;
+
+/**
+ * Describes a pipeline layout.
+ *
+ * A `PipelineLayoutDescriptor` can be used to create a pipeline layout.
+ */
+struct WGPUPipelineLayoutDescriptor;
+
+struct WGPURenderBundleEncoder;
+
+struct WGPURenderPass;
+
+/**
+ * The internal enum mirrored from `TextureUsage`. The values don't have to match!
+ */
+struct WGPUTextureUse;
+
+struct WGPUInfrastructure {
+ struct WGPUClient *client;
+ const uint8_t *error;
+};
+
+typedef WGPUNonZeroU64 WGPUId_Adapter_Dummy;
+
+typedef WGPUId_Adapter_Dummy WGPUAdapterId;
+
+typedef WGPUNonZeroU64 WGPUId_Device_Dummy;
+
+typedef WGPUId_Device_Dummy WGPUDeviceId;
+
+typedef WGPUNonZeroU64 WGPUId_Buffer_Dummy;
+
+typedef WGPUId_Buffer_Dummy WGPUBufferId;
+
+typedef const char *WGPURawString;
+
+/**
+ * Integral type used for buffer offsets.
+ */
+typedef uint64_t WGPUBufferAddress;
+
+/**
+ * Different ways that you can use a buffer.
+ *
+ * The usages determine what kind of memory the buffer is allocated from and what
+ * actions the buffer can partake in.
+ */
+typedef uint32_t WGPUBufferUsage;
+/**
+ * Allow a buffer to be mapped for reading using [`Buffer::map_async`] + [`Buffer::get_mapped_range`].
+ * This does not include creating a buffer with [`BufferDescriptor::mapped_at_creation`] set.
+ *
+ * If [`Features::MAPPABLE_PRIMARY_BUFFERS`] isn't enabled, the only other usage a buffer
+ * may have is COPY_DST.
+ */
+#define WGPUBufferUsage_MAP_READ (uint32_t)1
+/**
+ * Allow a buffer to be mapped for writing using [`Buffer::map_async`] + [`Buffer::get_mapped_range_mut`].
+ * This does not include creating a buffer with `mapped_at_creation` set.
+ *
+ * If [`Features::MAPPABLE_PRIMARY_BUFFERS`] feature isn't enabled, the only other usage a buffer
+ * may have is COPY_SRC.
+ */
+#define WGPUBufferUsage_MAP_WRITE (uint32_t)2
+/**
+ * Allow a buffer to be the source buffer for a [`CommandEncoder::copy_buffer_to_buffer`] or [`CommandEncoder::copy_buffer_to_texture`]
+ * operation.
+ */
+#define WGPUBufferUsage_COPY_SRC (uint32_t)4
+/**
+ * Allow a buffer to be the destination buffer for a [`CommandEncoder::copy_buffer_to_buffer`], [`CommandEncoder::copy_texture_to_buffer`],
+ * or [`Queue::write_buffer`] operation.
+ */
+#define WGPUBufferUsage_COPY_DST (uint32_t)8
+/**
+ * Allow a buffer to be the index buffer in a draw operation.
+ */
+#define WGPUBufferUsage_INDEX (uint32_t)16
+/**
+ * Allow a buffer to be the vertex buffer in a draw operation.
+ */
+#define WGPUBufferUsage_VERTEX (uint32_t)32
+/**
+ * Allow a buffer to be a [`BindingType::UniformBuffer`] inside a bind group.
+ */
+#define WGPUBufferUsage_UNIFORM (uint32_t)64
+/**
+ * Allow a buffer to be a [`BindingType::StorageBuffer`] inside a bind group.
+ */
+#define WGPUBufferUsage_STORAGE (uint32_t)128
+/**
+ * Allow a buffer to be the indirect buffer in an indirect draw call.
+ */
+#define WGPUBufferUsage_INDIRECT (uint32_t)256
+
+/**
+ * Describes a [`Buffer`].
+ */
+struct WGPUBufferDescriptor {
+ /**
+ * Debug label of a buffer. This will show up in graphics debuggers for easy identification.
+ */
+ WGPURawString label;
+ /**
+ * Size of a buffer.
+ */
+ WGPUBufferAddress size;
+ /**
+ * Usages of a buffer. If the buffer is used in any way that isn't specified here, the operation
+ * will panic.
+ */
+ WGPUBufferUsage usage;
+ /**
+ * Allows a buffer to be mapped immediately after they are made. It does not have to be [`BufferUsage::MAP_READ`] or
+ * [`BufferUsage::MAP_WRITE`], all buffers are allowed to be mapped at creation.
+ */
+ bool mapped_at_creation;
+};
+
+typedef WGPUNonZeroU64 WGPUId_Texture_Dummy;
+
+typedef WGPUId_Texture_Dummy WGPUTextureId;
+
+/**
+ * Extent of a texture related operation.
+ */
+struct WGPUExtent3d {
+ uint32_t width;
+ uint32_t height;
+ uint32_t depth;
+};
+
+/**
+ * Different ways that you can use a texture.
+ *
+ * The usages determine what kind of memory the texture is allocated from and what
+ * actions the texture can partake in.
+ */
+typedef uint32_t WGPUTextureUsage;
+/**
+ * Allows a texture to be the source in a [`CommandEncoder::copy_texture_to_buffer`] or
+ * [`CommandEncoder::copy_texture_to_texture`] operation.
+ */
+#define WGPUTextureUsage_COPY_SRC (uint32_t)1
+/**
+ * Allows a texture to be the destination in a [`CommandEncoder::copy_texture_to_buffer`],
+ * [`CommandEncoder::copy_texture_to_texture`], or [`Queue::write_texture`] operation.
+ */
+#define WGPUTextureUsage_COPY_DST (uint32_t)2
+/**
+ * Allows a texture to be a [`BindingType::SampledTexture`] in a bind group.
+ */
+#define WGPUTextureUsage_SAMPLED (uint32_t)4
+/**
+ * Allows a texture to be a [`BindingType::StorageTexture`] in a bind group.
+ */
+#define WGPUTextureUsage_STORAGE (uint32_t)8
+/**
+ * Allows a texture to be a output attachment of a renderpass.
+ */
+#define WGPUTextureUsage_OUTPUT_ATTACHMENT (uint32_t)16
+
+/**
+ * Describes a [`Texture`].
+ */
+struct WGPUTextureDescriptor {
+ /**
+ * Debug label of the texture. This will show up in graphics debuggers for easy identification.
+ */
+ WGPURawString label;
+ /**
+ * Size of the texture. For a regular 1D/2D texture, the unused sizes will be 1. For 2DArray textures, Z is the
+ * number of 2D textures in that array.
+ */
+ struct WGPUExtent3d size;
+ /**
+ * Mip count of texture. For a texture with no extra mips, this must be 1.
+ */
+ uint32_t mip_level_count;
+ /**
+ * Sample count of texture. If this is not 1, texture must have [`BindingType::SampledTexture::multisampled`] set to true.
+ */
+ uint32_t sample_count;
+ /**
+ * Dimensions of the texture.
+ */
+ enum WGPUTextureDimension dimension;
+ /**
+ * Format of the texture.
+ */
+ enum WGPUTextureFormat format;
+ /**
+ * Allowed usages of the texture. If used in other ways, the operation will panic.
+ */
+ WGPUTextureUsage usage;
+};
+
+typedef WGPUNonZeroU64 WGPUId_TextureView_Dummy;
+
+typedef WGPUId_TextureView_Dummy WGPUTextureViewId;
+
+struct WGPUTextureViewDescriptor {
+ WGPURawString label;
+ const enum WGPUTextureFormat *format;
+ const enum WGPUTextureViewDimension *dimension;
+ enum WGPUTextureAspect aspect;
+ uint32_t base_mip_level;
+ WGPUOption_NonZeroU32 level_count;
+ uint32_t base_array_layer;
+ WGPUOption_NonZeroU32 array_layer_count;
+};
+
+typedef WGPUNonZeroU64 WGPUId_Sampler_Dummy;
+
+typedef WGPUId_Sampler_Dummy WGPUSamplerId;
+
+struct WGPUSamplerDescriptor {
+ WGPURawString label;
+ enum WGPUAddressMode address_modes[3];
+ enum WGPUFilterMode mag_filter;
+ enum WGPUFilterMode min_filter;
+ enum WGPUFilterMode mipmap_filter;
+ float lod_min_clamp;
+ float lod_max_clamp;
+ const enum WGPUCompareFunction *compare;
+ WGPUOption_NonZeroU8 anisotropy_clamp;
+};
+
+typedef WGPUNonZeroU64 WGPUId_CommandBuffer_Dummy;
+
+typedef WGPUId_CommandBuffer_Dummy WGPUCommandBufferId;
+
+typedef WGPUCommandBufferId WGPUCommandEncoderId;
+
+/**
+ * Describes a [`CommandEncoder`].
+ */
+struct WGPUCommandEncoderDescriptor {
+ /**
+ * Debug label for the command encoder. This will show up in graphics debuggers for easy identification.
+ */
+ WGPURawString label;
+};
+
+struct WGPUComputePassDescriptor {
+ uint32_t todo;
+};
+
+/**
+ * RGBA double precision color.
+ *
+ * This is not to be used as a generic color type, only for specific wgpu interfaces.
+ */
+struct WGPUColor {
+ double r;
+ double g;
+ double b;
+ double a;
+};
+#define WGPUColor_TRANSPARENT (WGPUColor){ .r = 0.0, .g = 0.0, .b = 0.0, .a = 0.0 }
+#define WGPUColor_BLACK (WGPUColor){ .r = 0.0, .g = 0.0, .b = 0.0, .a = 1.0 }
+#define WGPUColor_WHITE (WGPUColor){ .r = 1.0, .g = 1.0, .b = 1.0, .a = 1.0 }
+#define WGPUColor_RED (WGPUColor){ .r = 1.0, .g = 0.0, .b = 0.0, .a = 1.0 }
+#define WGPUColor_GREEN (WGPUColor){ .r = 0.0, .g = 1.0, .b = 0.0, .a = 1.0 }
+#define WGPUColor_BLUE (WGPUColor){ .r = 0.0, .g = 0.0, .b = 1.0, .a = 1.0 }
+
+/**
+ * Describes an individual channel within a render pass, such as color, depth, or stencil.
+ */
+struct WGPUPassChannel_Color {
+ /**
+ * Operation to perform to the output attachment at the start of a renderpass. This must be clear if it
+ * is the first renderpass rendering to a swap chain image.
+ */
+ enum WGPULoadOp load_op;
+ /**
+ * Operation to perform to the output attachment at the end of a renderpass.
+ */
+ enum WGPUStoreOp store_op;
+ /**
+ * If load_op is [`LoadOp::Clear`], the attachement will be cleared to this color.
+ */
+ struct WGPUColor clear_value;
+ /**
+ * If true, the relevant channel is not changed by a renderpass, and the corresponding attachment
+ * can be used inside the pass by other read-only usages.
+ */
+ bool read_only;
+};
+
+/**
+ * Describes a color attachment to a render pass.
+ */
+struct WGPUColorAttachmentDescriptor {
+ /**
+ * The view to use as an attachment.
+ */
+ WGPUTextureViewId attachment;
+ /**
+ * The view that will receive the resolved output if multisampling is used.
+ */
+ WGPUOption_TextureViewId resolve_target;
+ /**
+ * What operations will be performed on this color attachment.
+ */
+ struct WGPUPassChannel_Color channel;
+};
+
+/**
+ * Describes an individual channel within a render pass, such as color, depth, or stencil.
+ */
+struct WGPUPassChannel_f32 {
+ /**
+ * Operation to perform to the output attachment at the start of a renderpass. This must be clear if it
+ * is the first renderpass rendering to a swap chain image.
+ */
+ enum WGPULoadOp load_op;
+ /**
+ * Operation to perform to the output attachment at the end of a renderpass.
+ */
+ enum WGPUStoreOp store_op;
+ /**
+ * If load_op is [`LoadOp::Clear`], the attachement will be cleared to this color.
+ */
+ float clear_value;
+ /**
+ * If true, the relevant channel is not changed by a renderpass, and the corresponding attachment
+ * can be used inside the pass by other read-only usages.
+ */
+ bool read_only;
+};
+
+/**
+ * Describes an individual channel within a render pass, such as color, depth, or stencil.
+ */
+struct WGPUPassChannel_u32 {
+ /**
+ * Operation to perform to the output attachment at the start of a renderpass. This must be clear if it
+ * is the first renderpass rendering to a swap chain image.
+ */
+ enum WGPULoadOp load_op;
+ /**
+ * Operation to perform to the output attachment at the end of a renderpass.
+ */
+ enum WGPUStoreOp store_op;
+ /**
+ * If load_op is [`LoadOp::Clear`], the attachement will be cleared to this color.
+ */
+ uint32_t clear_value;
+ /**
+ * If true, the relevant channel is not changed by a renderpass, and the corresponding attachment
+ * can be used inside the pass by other read-only usages.
+ */
+ bool read_only;
+};
+
+/**
+ * Describes a depth/stencil attachment to a render pass.
+ */
+struct WGPUDepthStencilAttachmentDescriptor {
+ /**
+ * The view to use as an attachment.
+ */
+ WGPUTextureViewId attachment;
+ /**
+ * What operations will be performed on the depth part of the attachment.
+ */
+ struct WGPUPassChannel_f32 depth;
+ /**
+ * What operations will be performed on the stencil part of the attachment.
+ */
+ struct WGPUPassChannel_u32 stencil;
+};
+
+struct WGPURenderPassDescriptor {
+ const struct WGPUColorAttachmentDescriptor *color_attachments;
+ uintptr_t color_attachments_length;
+ const struct WGPUDepthStencilAttachmentDescriptor *depth_stencil_attachment;
+};
+
+typedef WGPUNonZeroU64 WGPUId_BindGroupLayout_Dummy;
+
+typedef WGPUId_BindGroupLayout_Dummy WGPUBindGroupLayoutId;
+
+typedef WGPUNonZeroU64 WGPUId_PipelineLayout_Dummy;
+
+typedef WGPUId_PipelineLayout_Dummy WGPUPipelineLayoutId;
+
+typedef WGPUNonZeroU64 WGPUId_BindGroup_Dummy;
+
+typedef WGPUId_BindGroup_Dummy WGPUBindGroupId;
+
+typedef WGPUNonZeroU64 WGPUId_ShaderModule_Dummy;
+
+typedef WGPUId_ShaderModule_Dummy WGPUShaderModuleId;
+
+struct WGPUShaderModuleDescriptor {
+ const uint32_t *spirv_words;
+ uintptr_t spirv_words_length;
+ WGPURawString wgsl_chars;
+};
+
+typedef WGPUNonZeroU64 WGPUId_ComputePipeline_Dummy;
+
+typedef WGPUId_ComputePipeline_Dummy WGPUComputePipelineId;
+
+struct WGPUProgrammableStageDescriptor {
+ WGPUShaderModuleId module;
+ WGPURawString entry_point;
+};
+
+struct WGPUComputePipelineDescriptor {
+ WGPURawString label;
+ WGPUOption_PipelineLayoutId layout;
+ struct WGPUProgrammableStageDescriptor compute_stage;
+};
+
+typedef WGPUNonZeroU64 WGPUId_RenderPipeline_Dummy;
+
+typedef WGPUId_RenderPipeline_Dummy WGPURenderPipelineId;
+
+/**
+ * Describes the state of the rasterizer in a render pipeline.
+ */
+struct WGPURasterizationStateDescriptor {
+ enum WGPUFrontFace front_face;
+ enum WGPUCullMode cull_mode;
+ /**
+ * Controls the way each polygon is rasterized. Can be either `Fill` (default), `Line` or `Point`
+ *
+ * Setting this to something other than `Fill` requires `Features::NON_FILL_POLYGON_MODE` to be enabled.
+ */
+ enum WGPUPolygonMode polygon_mode;
+ /**
+ * If enabled polygon depth is clamped to 0-1 range instead of being clipped.
+ *
+ * Requires `Features::DEPTH_CLAMPING` enabled.
+ */
+ bool clamp_depth;
+ int32_t depth_bias;
+ float depth_bias_slope_scale;
+ float depth_bias_clamp;
+};
+
+/**
+ * Describes the blend state of a pipeline.
+ *
+ * Alpha blending is very complicated: see the OpenGL or Vulkan spec for more information.
+ */
+struct WGPUBlendDescriptor {
+ enum WGPUBlendFactor src_factor;
+ enum WGPUBlendFactor dst_factor;
+ enum WGPUBlendOperation operation;
+};
+
+/**
+ * Color write mask. Disabled color channels will not be written to.
+ */
+typedef uint32_t WGPUColorWrite;
+/**
+ * Enable red channel writes
+ */
+#define WGPUColorWrite_RED (uint32_t)1
+/**
+ * Enable green channel writes
+ */
+#define WGPUColorWrite_GREEN (uint32_t)2
+/**
+ * Enable blue channel writes
+ */
+#define WGPUColorWrite_BLUE (uint32_t)4
+/**
+ * Enable alpha channel writes
+ */
+#define WGPUColorWrite_ALPHA (uint32_t)8
+/**
+ * Enable red, green, and blue channel writes
+ */
+#define WGPUColorWrite_COLOR (uint32_t)7
+/**
+ * Enable writes to all channels.
+ */
+#define WGPUColorWrite_ALL (uint32_t)15
+
+/**
+ * Describes the color state of a render pipeline.
+ */
+struct WGPUColorStateDescriptor {
+ /**
+ * The [`TextureFormat`] of the image that this pipeline will render to. Must match the the format
+ * of the corresponding color attachment in [`CommandEncoder::begin_render_pass`].
+ */
+ enum WGPUTextureFormat format;
+ /**
+ * The alpha blending that is used for this pipeline.
+ */
+ struct WGPUBlendDescriptor alpha_blend;
+ /**
+ * The color blending that is used for this pipeline.
+ */
+ struct WGPUBlendDescriptor color_blend;
+ /**
+ * Mask which enables/disables writes to different color/alpha channel.
+ */
+ WGPUColorWrite write_mask;
+};
+
+/**
+ * Describes stencil state in a render pipeline.
+ *
+ * If you are not using stencil state, set this to [`StencilStateFaceDescriptor::IGNORE`].
+ */
+struct WGPUStencilStateFaceDescriptor {
+ /**
+ * Comparison function that determines if the fail_op or pass_op is used on the stencil buffer.
+ */
+ enum WGPUCompareFunction compare;
+ /**
+ * Operation that is preformed when stencil test fails.
+ */
+ enum WGPUStencilOperation fail_op;
+ /**
+ * Operation that is performed when depth test fails but stencil test succeeds.
+ */
+ enum WGPUStencilOperation depth_fail_op;
+ /**
+ * Operation that is performed when stencil test success.
+ */
+ enum WGPUStencilOperation pass_op;
+};
+
+struct WGPUStencilStateDescriptor {
+ /**
+ * Front face mode.
+ */
+ struct WGPUStencilStateFaceDescriptor front;
+ /**
+ * Back face mode.
+ */
+ struct WGPUStencilStateFaceDescriptor back;
+ /**
+ * Stencil values are AND'd with this mask when reading and writing from the stencil buffer. Only low 8 bits are used.
+ */
+ uint32_t read_mask;
+ /**
+ * Stencil values are AND'd with this mask when writing to the stencil buffer. Only low 8 bits are used.
+ */
+ uint32_t write_mask;
+};
+
+/**
+ * Describes the depth/stencil state in a render pipeline.
+ */
+struct WGPUDepthStencilStateDescriptor {
+ /**
+ * Format of the depth/stencil buffer, must be special depth format. Must match the the format
+ * of the depth/stencil attachment in [`CommandEncoder::begin_render_pass`].
+ */
+ enum WGPUTextureFormat format;
+ /**
+ * If disabled, depth will not be written to.
+ */
+ bool depth_write_enabled;
+ /**
+ * Comparison function used to compare depth values in the depth test.
+ */
+ enum WGPUCompareFunction depth_compare;
+ struct WGPUStencilStateDescriptor stencil;
+};
+
+/**
+ * Integral type used for binding locations in shaders.
+ */
+typedef uint32_t WGPUShaderLocation;
+
+/**
+ * Vertex inputs (attributes) to shaders.
+ *
+ * Arrays of these can be made with the [`vertex_attr_array`] macro. Vertex attributes are assumed to be tightly packed.
+ */
+struct WGPUVertexAttributeDescriptor {
+ /**
+ * Byte offset of the start of the input
+ */
+ WGPUBufferAddress offset;
+ /**
+ * Format of the input
+ */
+ enum WGPUVertexFormat format;
+ /**
+ * Location for this input. Must match the location in the shader.
+ */
+ WGPUShaderLocation shader_location;
+};
+
+struct WGPUVertexBufferDescriptor {
+ WGPUBufferAddress stride;
+ enum WGPUInputStepMode step_mode;
+ const struct WGPUVertexAttributeDescriptor *attributes;
+ uintptr_t attributes_length;
+};
+
+struct WGPUVertexStateDescriptor {
+ enum WGPUIndexFormat index_format;
+ const struct WGPUVertexBufferDescriptor *vertex_buffers;
+ uintptr_t vertex_buffers_length;
+};
+
+struct WGPURenderPipelineDescriptor {
+ WGPURawString label;
+ WGPUOption_PipelineLayoutId layout;
+ const struct WGPUProgrammableStageDescriptor *vertex_stage;
+ const struct WGPUProgrammableStageDescriptor *fragment_stage;
+ enum WGPUPrimitiveTopology primitive_topology;
+ const struct WGPURasterizationStateDescriptor *rasterization_state;
+ const struct WGPUColorStateDescriptor *color_states;
+ uintptr_t color_states_length;
+ const struct WGPUDepthStencilStateDescriptor *depth_stencil_state;
+ struct WGPUVertexStateDescriptor vertex_state;
+ uint32_t sample_count;
+ uint32_t sample_mask;
+ bool alpha_to_coverage_enabled;
+};
+
+typedef void *WGPUFactoryParam;
+
+typedef WGPUNonZeroU64 WGPUId_SwapChain_Dummy;
+
+typedef WGPUId_SwapChain_Dummy WGPUSwapChainId;
+
+typedef WGPUNonZeroU64 WGPUId_RenderBundle;
+
+typedef WGPUId_RenderBundle WGPURenderBundleId;
+
+typedef WGPUNonZeroU64 WGPUId_Surface;
+
+typedef WGPUId_Surface WGPUSurfaceId;
+
+struct WGPUIdentityRecyclerFactory {
+ WGPUFactoryParam param;
+ void (*free_adapter)(WGPUAdapterId, WGPUFactoryParam);
+ void (*free_device)(WGPUDeviceId, WGPUFactoryParam);
+ void (*free_swap_chain)(WGPUSwapChainId, WGPUFactoryParam);
+ void (*free_pipeline_layout)(WGPUPipelineLayoutId, WGPUFactoryParam);
+ void (*free_shader_module)(WGPUShaderModuleId, WGPUFactoryParam);
+ void (*free_bind_group_layout)(WGPUBindGroupLayoutId, WGPUFactoryParam);
+ void (*free_bind_group)(WGPUBindGroupId, WGPUFactoryParam);
+ void (*free_command_buffer)(WGPUCommandBufferId, WGPUFactoryParam);
+ void (*free_render_bundle)(WGPURenderBundleId, WGPUFactoryParam);
+ void (*free_render_pipeline)(WGPURenderPipelineId, WGPUFactoryParam);
+ void (*free_compute_pipeline)(WGPUComputePipelineId, WGPUFactoryParam);
+ void (*free_buffer)(WGPUBufferId, WGPUFactoryParam);
+ void (*free_texture)(WGPUTextureId, WGPUFactoryParam);
+ void (*free_texture_view)(WGPUTextureViewId, WGPUFactoryParam);
+ void (*free_sampler)(WGPUSamplerId, WGPUFactoryParam);
+ void (*free_surface)(WGPUSurfaceId, WGPUFactoryParam);
+};
+
+/**
+ * Options for requesting adapter.
+ */
+struct WGPURequestAdapterOptions_SurfaceId {
+ /**
+ * Power preference for the adapter.
+ */
+ enum WGPUPowerPreference power_preference;
+ /**
+ * Surface that is required to be presentable with the requested adapter. This does not
+ * create the surface, only guarantees that the adapter can present to said surface.
+ */
+ WGPUOption_SurfaceId compatible_surface;
+};
+
+typedef struct WGPURequestAdapterOptions_SurfaceId WGPURequestAdapterOptions;
+
+/**
+ * Features that are not guaranteed to be supported.
+ *
+ * These are either part of the webgpu standard, or are extension features supported by
+ * wgpu when targeting native.
+ *
+ * If you want to use a feature, you need to first verify that the adapter supports
+ * the feature. If the adapter does not support the feature, requesting a device with it enabled
+ * will panic.
+ */
+typedef uint64_t WGPUFeatures;
+/**
+ * By default, polygon depth is clipped to 0-1 range. Anything outside of that range
+ * is rejected, and respective fragments are not touched.
+ *
+ * With this extension, we can force clamping of the polygon depth to 0-1. That allows
+ * shadow map occluders to be rendered into a tighter depth range.
+ *
+ * Supported platforms:
+ * - desktops
+ * - some mobile chips
+ *
+ * This is a web and native feature.
+ */
+#define WGPUFeatures_DEPTH_CLAMPING (uint64_t)1
+/**
+ * Enables BCn family of compressed textures. All BCn textures use 4x4 pixel blocks
+ * with 8 or 16 bytes per block.
+ *
+ * Compressed textures sacrifice some quality in exchange for signifigantly reduced
+ * bandwidth usage.
+ *
+ * Supported Platforms:
+ * - desktops
+ *
+ * This is a web and native feature.
+ */
+#define WGPUFeatures_TEXTURE_COMPRESSION_BC (uint64_t)2
+/**
+ * Webgpu only allows the MAP_READ and MAP_WRITE buffer usage to be matched with
+ * COPY_DST and COPY_SRC respectively. This removes this requirement.
+ *
+ * This is only beneficial on systems that share memory between CPU and GPU. If enabled
+ * on a system that doesn't, this can severely hinder performance. Only use if you understand
+ * the consequences.
+ *
+ * Supported platforms:
+ * - All
+ *
+ * This is a native only feature.
+ */
+#define WGPUFeatures_MAPPABLE_PRIMARY_BUFFERS (uint64_t)65536
+/**
+ * Allows the user to create uniform arrays of sampled textures in shaders:
+ *
+ * eg. `uniform texture2D textures[10]`.
+ *
+ * This capability allows them to exist and to be indexed by compile time constant
+ * values.
+ *
+ * Supported platforms:
+ * - DX12
+ * - Metal (with MSL 2.0+ on macOS 10.13+)
+ * - Vulkan
+ *
+ * This is a native only feature.
+ */
+#define WGPUFeatures_SAMPLED_TEXTURE_BINDING_ARRAY (uint64_t)131072
+/**
+ * Allows shaders to index sampled texture arrays with dynamically uniform values:
+ *
+ * eg. `texture_array[uniform_value]`
+ *
+ * This capability means the hardware will also support SAMPLED_TEXTURE_BINDING_ARRAY.
+ *
+ * Supported platforms:
+ * - DX12
+ * - Metal (with MSL 2.0+ on macOS 10.13+)
+ * - Vulkan's shaderSampledImageArrayDynamicIndexing feature
+ *
+ * This is a native only feature.
+ */
+#define WGPUFeatures_SAMPLED_TEXTURE_ARRAY_DYNAMIC_INDEXING (uint64_t)262144
+/**
+ * Allows shaders to index sampled texture arrays with dynamically non-uniform values:
+ *
+ * eg. `texture_array[vertex_data]`
+ *
+ * In order to use this capability, the corresponding GLSL extension must be enabled like so:
+ *
+ * `#extension GL_EXT_nonuniform_qualifier : require`
+ *
+ * and then used either as `nonuniformEXT` qualifier in variable declaration:
+ *
+ * eg. `layout(location = 0) nonuniformEXT flat in int vertex_data;`
+ *
+ * or as `nonuniformEXT` constructor:
+ *
+ * eg. `texture_array[nonuniformEXT(vertex_data)]`
+ *
+ * HLSL does not need any extension.
+ *
+ * This capability means the hardware will also support SAMPLED_TEXTURE_ARRAY_DYNAMIC_INDEXING
+ * and SAMPLED_TEXTURE_BINDING_ARRAY.
+ *
+ * Supported platforms:
+ * - DX12
+ * - Metal (with MSL 2.0+ on macOS 10.13+)
+ * - Vulkan 1.2+ (or VK_EXT_descriptor_indexing)'s shaderSampledImageArrayNonUniformIndexing feature)
+ *
+ * This is a native only feature.
+ */
+#define WGPUFeatures_SAMPLED_TEXTURE_ARRAY_NON_UNIFORM_INDEXING (uint64_t)524288
+/**
+ * Allows the user to create unsized uniform arrays of bindings:
+ *
+ * eg. `uniform texture2D textures[]`.
+ *
+ * If this capability is supported, SAMPLED_TEXTURE_ARRAY_NON_UNIFORM_INDEXING is very likely
+ * to also be supported
+ *
+ * Supported platforms:
+ * - DX12
+ * - Vulkan 1.2+ (or VK_EXT_descriptor_indexing)'s runtimeDescriptorArray feature
+ *
+ * This is a native only feature.
+ */
+#define WGPUFeatures_UNSIZED_BINDING_ARRAY (uint64_t)1048576
+/**
+ * Allows the user to call [`RenderPass::multi_draw_indirect`] and [`RenderPass::multi_draw_indexed_indirect`].
+ *
+ * Allows multiple indirect calls to be dispatched from a single buffer.
+ *
+ * Supported platforms:
+ * - DX12
+ * - Metal
+ * - Vulkan
+ *
+ * This is a native only feature.
+ */
+#define WGPUFeatures_MULTI_DRAW_INDIRECT (uint64_t)2097152
+/**
+ * Allows the user to call [`RenderPass::multi_draw_indirect_count`] and [`RenderPass::multi_draw_indexed_indirect_count`].
+ *
+ * This allows the use of a buffer containing the actual number of draw calls.
+ *
+ * Supported platforms:
+ * - DX12
+ * - Vulkan 1.2+ (or VK_KHR_draw_indirect_count)
+ *
+ * This is a native only feature.
+ */
+#define WGPUFeatures_MULTI_DRAW_INDIRECT_COUNT (uint64_t)4194304
+/**
+ * Allows the use of push constants: small, fast bits of memory that can be updated
+ * inside a [`RenderPass`].
+ *
+ * Allows the user to call [`RenderPass::set_push_constants`], provide a non-empty array
+ * to [`PipelineLayoutDescriptor`], and provide a non-zero limit to [`Limits::max_push_constant_size`].
+ *
+ * A block of push constants can be declared with `layout(push_constant) uniform Name {..}` in shaders.
+ *
+ * Supported platforms:
+ * - DX12
+ * - Vulkan
+ * - Metal
+ * - DX11 (emulated with uniforms)
+ * - OpenGL (emulated with uniforms)
+ *
+ * This is a native only feature.
+ */
+#define WGPUFeatures_PUSH_CONSTANTS (uint64_t)8388608
+/**
+ * Allows the use of [`AddressMode::ClampToBorder`].
+ *
+ * Supported platforms:
+ * - DX12
+ * - Vulkan
+ * - Metal (macOS 10.12+ only)
+ * - DX11
+ * - OpenGL
+ *
+ * This is a web and native feature.
+ */
+#define WGPUFeatures_ADDRESS_MODE_CLAMP_TO_BORDER (uint64_t)16777216
+/**
+ * Allows the user to set a non-fill polygon mode in [`RasterizationStateDescriptor::polygon_mode`]
+ *
+ * This allows drawing polygons/triangles as lines (wireframe) or points instead of filled
+ *
+ * Supported platforms:
+ * - DX12
+ * - Vulkan
+ *
+ * This is a native only feature.
+ */
+#define WGPUFeatures_NON_FILL_POLYGON_MODE (uint64_t)33554432
+/**
+ * Features which are part of the upstream WebGPU standard.
+ */
+#define WGPUFeatures_ALL_WEBGPU (uint64_t)65535
+/**
+ * Features that are only available when targeting native (not web).
+ */
+#define WGPUFeatures_ALL_NATIVE (uint64_t)18446744073709486080ULL
+
+/**
+ * Represents the sets of limits an adapter/device supports.
+ *
+ * Limits "better" than the default must be supported by the adapter and requested when requesting
+ * a device. If limits "better" than the adapter supports are requested, requesting a device will panic.
+ * Once a device is requested, you may only use resources up to the limits requested _even_ if the
+ * adapter supports "better" limits.
+ *
+ * Requesting limits that are "better" than you need may cause performance to decrease because the
+ * implementation needs to support more than is needed. You should ideally only request exactly what
+ * you need.
+ *
+ * See also: https://gpuweb.github.io/gpuweb/#dictdef-gpulimits
+ */
+struct WGPULimits {
+ /**
+ * Amount of bind groups that can be attached to a pipeline at the same time. Defaults to 4. Higher is "better".
+ */
+ uint32_t max_bind_groups;
+ /**
+ * Amount of uniform buffer bindings that can be dynamic in a single pipeline. Defaults to 8. Higher is "better".
+ */
+ uint32_t max_dynamic_uniform_buffers_per_pipeline_layout;
+ /**
+ * Amount of storage buffer bindings that can be dynamic in a single pipeline. Defaults to 4. Higher is "better".
+ */
+ uint32_t max_dynamic_storage_buffers_per_pipeline_layout;
+ /**
+ * Amount of sampled textures visible in a single shader stage. Defaults to 16. Higher is "better".
+ */
+ uint32_t max_sampled_textures_per_shader_stage;
+ /**
+ * Amount of samplers visible in a single shader stage. Defaults to 16. Higher is "better".
+ */
+ uint32_t max_samplers_per_shader_stage;
+ /**
+ * Amount of storage buffers visible in a single shader stage. Defaults to 4. Higher is "better".
+ */
+ uint32_t max_storage_buffers_per_shader_stage;
+ /**
+ * Amount of storage textures visible in a single shader stage. Defaults to 4. Higher is "better".
+ */
+ uint32_t max_storage_textures_per_shader_stage;
+ /**
+ * Amount of uniform buffers visible in a single shader stage. Defaults to 12. Higher is "better".
+ */
+ uint32_t max_uniform_buffers_per_shader_stage;
+ /**
+ * Maximum size in bytes of a binding to a uniform buffer. Defaults to 16384. Higher is "better".
+ */
+ uint32_t max_uniform_buffer_binding_size;
+ /**
+ * Amount of storage available for push constants in bytes. Defaults to 0. Higher is "better".
+ * Requesting more than 0 during device creation requires [`Features::PUSH_CONSTANTS`] to be enabled.
+ *
+ * Expect the size to be:
+ * - Vulkan: 128-256 bytes
+ * - DX12: 256 bytes
+ * - Metal: 4096 bytes
+ * - DX11 & OpenGL don't natively support push constants, and are emulated with uniforms,
+ * so this number is less useful.
+ */
+ uint32_t max_push_constant_size;
+};
+
+/**
+ * Describes a [`Device`].
+ */
+struct WGPUDeviceDescriptor {
+ /**
+ * Features that the device should support. If any feature is not supported by
+ * the adapter, creating a device will panic.
+ */
+ WGPUFeatures features;
+ /**
+ * Limits that the device should support. If any limit is "better" than the limit exposed by
+ * the adapter, creating a device will panic.
+ */
+ struct WGPULimits limits;
+ /**
+ * Switch shader validation on/off. This is a temporary field
+ * that will be removed once our validation logic is complete.
+ */
+ bool shader_validation;
+};
+
+typedef void (*WGPUBufferMapCallback)(enum WGPUBufferMapAsyncStatus status, uint8_t *userdata);
+
+struct WGPUBufferMapOperation {
+ enum WGPUHostMap host;
+ WGPUBufferMapCallback callback;
+ uint8_t *user_data;
+};
+
+/**
+ * Describes a [`CommandBuffer`].
+ */
+struct WGPUCommandBufferDescriptor {
+ WGPURawString label;
+};
+
+/**
+ * Origin of a copy to/from a texture.
+ */
+struct WGPUOrigin3d {
+ uint32_t x;
+ uint32_t y;
+ uint32_t z;
+};
+#define WGPUOrigin3d_ZERO (WGPUOrigin3d){ .x = 0, .y = 0, .z = 0 }
+
+/**
+ * View of a texture which can be used to copy to/from a buffer/texture.
+ */
+struct WGPUTextureCopyView_TextureId {
+ /**
+ * The texture to be copied to/from.
+ */
+ WGPUTextureId texture;
+ /**
+ * The target mip level of the texture.
+ */
+ uint32_t mip_level;
+ /**
+ * The base texel of the texture in the selected `mip_level`.
+ */
+ struct WGPUOrigin3d origin;
+};
+
+typedef struct WGPUTextureCopyView_TextureId WGPUTextureCopyView;
+
+/**
+ * Layout of a texture in a buffer's memory.
+ */
+struct WGPUTextureDataLayout {
+ /**
+ * Offset into the buffer that is the start of the texture. Must be a multiple of texture block size.
+ * For non-compressed textures, this is 1.
+ */
+ WGPUBufferAddress offset;
+ /**
+ * Bytes per "row" of the image. This represents one row of pixels in the x direction. Compressed
+ * textures include multiple rows of pixels in each "row". May be 0 for 1D texture copies.
+ *
+ * Must be a multiple of 256 for [`CommandEncoder::copy_buffer_to_texture`] and [`CommandEncoder::copy_texture_to_buffer`].
+ * [`Queue::write_texture`] does not have this requirement.
+ *
+ * Must be a multiple of the texture block size. For non-compressed textures, this is 1.
+ */
+ uint32_t bytes_per_row;
+ /**
+ * Rows that make up a single "image". Each "image" is one layer in the z direction of a 3D image. May be larger
+ * than `copy_size.y`.
+ *
+ * May be 0 for 2D texture copies.
+ */
+ uint32_t rows_per_image;
+};
+
+/**
+ * View of a buffer which can be used to copy to/from a texture.
+ */
+struct WGPUBufferCopyView_BufferId {
+ /**
+ * The buffer to be copied to/from.
+ */
+ WGPUBufferId buffer;
+ /**
+ * The layout of the texture data in this buffer.
+ */
+ struct WGPUTextureDataLayout layout;
+};
+
+typedef struct WGPUBufferCopyView_BufferId WGPUBufferCopyView;
+
+typedef WGPUDeviceId WGPUQueueId;
+
+/**
+ * Describes the shader stages that a binding will be visible from.
+ *
+ * These can be combined so something that is visible from both vertex and fragment shaders can be defined as:
+ *
+ * `ShaderStage::VERTEX | ShaderStage::FRAGMENT`
+ */
+typedef uint32_t WGPUShaderStage;
+/**
+ * Binding is not visible from any shader stage.
+ */
+#define WGPUShaderStage_NONE (uint32_t)0
+/**
+ * Binding is visible from the vertex shader of a render pipeline.
+ */
+#define WGPUShaderStage_VERTEX (uint32_t)1
+/**
+ * Binding is visible from the fragment shader of a render pipeline.
+ */
+#define WGPUShaderStage_FRAGMENT (uint32_t)2
+/**
+ * Binding is visible from the compute shader of a compute pipeline.
+ */
+#define WGPUShaderStage_COMPUTE (uint32_t)4
+
+typedef uint32_t WGPURawEnumOption_TextureViewDimension;
+
+typedef uint32_t WGPURawEnumOption_TextureComponentType;
+
+typedef uint32_t WGPURawEnumOption_TextureFormat;
+
+struct WGPUBindGroupLayoutEntry {
+ uint32_t binding;
+ WGPUShaderStage visibility;
+ enum WGPURawBindingType ty;
+ bool has_dynamic_offset;
+ WGPUOption_BufferSize min_binding_size;
+ WGPURawEnumOption_TextureViewDimension view_dimension;
+ WGPURawEnumOption_TextureComponentType texture_component_type;
+ bool multisampled;
+ WGPURawEnumOption_TextureFormat storage_texture_format;
+};
+
+struct WGPUBindGroupLayoutDescriptor {
+ WGPURawString label;
+ const struct WGPUBindGroupLayoutEntry *entries;
+ uintptr_t entries_length;
+};
+
+struct WGPUBindGroupEntry {
+ uint32_t binding;
+ WGPUOption_BufferId buffer;
+ WGPUBufferAddress offset;
+ WGPUOption_BufferSize size;
+ WGPUOption_SamplerId sampler;
+ WGPUOption_TextureViewId texture_view;
+};
+
+struct WGPUBindGroupDescriptor {
+ WGPURawString label;
+ WGPUBindGroupLayoutId layout;
+ const struct WGPUBindGroupEntry *entries;
+ uintptr_t entries_length;
+};
+
+/**
+ * Integral type used for dynamic bind group offsets.
+ */
+typedef uint32_t WGPUDynamicOffset;
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+/**
+ * Bound uniform/storage buffer offsets must be aligned to this number.
+ */
+#define WGPUBIND_BUFFER_ALIGNMENT 256
+
+/**
+ * Buffer to buffer copy offsets and sizes must be aligned to this number.
+ */
+#define WGPUCOPY_BUFFER_ALIGNMENT 4
+
+/**
+ * Vertex buffer strides have to be aligned to this number.
+ */
+#define WGPUVERTEX_STRIDE_ALIGNMENT 4
+
+WGPU_INLINE
+struct WGPUInfrastructure wgpu_client_new(void)
+WGPU_FUNC;
+
+/**
+ * # Safety
+ *
+ * This function is unsafe because improper use may lead to memory
+ * problems. For example, a double-free may occur if the function is called
+ * twice on the same raw pointer.
+ */
+WGPU_INLINE
+void wgpu_client_delete(struct WGPUClient *aClient)
+WGPU_FUNC;
+
+/**
+ * # Safety
+ *
+ * This function is unsafe as there is no guarantee that the given pointer is
+ * valid for `id_length` elements.
+ */
+WGPU_INLINE
+uintptr_t wgpu_client_make_adapter_ids(const struct WGPUClient *aClient,
+ WGPUAdapterId *aIds,
+ uintptr_t aIdLength)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_client_kill_adapter_id(const struct WGPUClient *aClient,
+ WGPUAdapterId aId)
+WGPU_FUNC;
+
+WGPU_INLINE
+WGPUDeviceId wgpu_client_make_device_id(const struct WGPUClient *aClient,
+ WGPUAdapterId aAdapterId)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_client_kill_device_id(const struct WGPUClient *aClient,
+ WGPUDeviceId aId)
+WGPU_FUNC;
+
+WGPU_INLINE
+WGPUBufferId wgpu_client_make_buffer_id(const struct WGPUClient *aClient,
+ WGPUDeviceId aDeviceId)
+WGPU_FUNC;
+
+WGPU_INLINE
+WGPUBufferId wgpu_client_create_buffer(const struct WGPUClient *aClient,
+ WGPUDeviceId aDeviceId,
+ const struct WGPUBufferDescriptor *aDesc,
+ WGPUByteBuf *aBb)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_client_kill_buffer_id(const struct WGPUClient *aClient,
+ WGPUBufferId aId)
+WGPU_FUNC;
+
+WGPU_INLINE
+WGPUTextureId wgpu_client_create_texture(const struct WGPUClient *aClient,
+ WGPUDeviceId aDeviceId,
+ const struct WGPUTextureDescriptor *aDesc,
+ WGPUByteBuf *aBb)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_client_kill_texture_id(const struct WGPUClient *aClient,
+ WGPUTextureId aId)
+WGPU_FUNC;
+
+WGPU_INLINE
+WGPUTextureViewId wgpu_client_create_texture_view(const struct WGPUClient *aClient,
+ WGPUDeviceId aDeviceId,
+ const struct WGPUTextureViewDescriptor *aDesc,
+ WGPUByteBuf *aBb)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_client_kill_texture_view_id(const struct WGPUClient *aClient,
+ WGPUTextureViewId aId)
+WGPU_FUNC;
+
+WGPU_INLINE
+WGPUSamplerId wgpu_client_create_sampler(const struct WGPUClient *aClient,
+ WGPUDeviceId aDeviceId,
+ const struct WGPUSamplerDescriptor *aDesc,
+ WGPUByteBuf *aBb)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_client_kill_sampler_id(const struct WGPUClient *aClient,
+ WGPUSamplerId aId)
+WGPU_FUNC;
+
+WGPU_INLINE
+WGPUCommandEncoderId wgpu_client_create_command_encoder(const struct WGPUClient *aClient,
+ WGPUDeviceId aDeviceId,
+ const struct WGPUCommandEncoderDescriptor *aDesc,
+ WGPUByteBuf *aBb)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_client_kill_encoder_id(const struct WGPUClient *aClient,
+ WGPUCommandEncoderId aId)
+WGPU_FUNC;
+
+WGPU_INLINE
+struct WGPUComputePass *wgpu_command_encoder_begin_compute_pass(WGPUCommandEncoderId aEncoderId,
+ const struct WGPUComputePassDescriptor *aDesc)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_compute_pass_finish(const struct WGPUComputePass *aPass,
+ WGPUByteBuf *aOutput)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_compute_pass_destroy(struct WGPUComputePass *aPass)
+WGPU_FUNC;
+
+WGPU_INLINE
+struct WGPURenderPass *wgpu_command_encoder_begin_render_pass(WGPUCommandEncoderId aEncoderId,
+ const struct WGPURenderPassDescriptor *aDesc)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_render_pass_finish(const struct WGPURenderPass *aPass,
+ WGPUByteBuf *aOutput)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_render_pass_destroy(struct WGPURenderPass *aPass)
+WGPU_FUNC;
+
+WGPU_INLINE
+WGPUBindGroupLayoutId wgpu_client_make_bind_group_layout_id(const struct WGPUClient *aClient,
+ WGPUDeviceId aDeviceId)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_client_kill_bind_group_layout_id(const struct WGPUClient *aClient,
+ WGPUBindGroupLayoutId aId)
+WGPU_FUNC;
+
+WGPU_INLINE
+WGPUPipelineLayoutId wgpu_client_make_pipeline_layout_id(const struct WGPUClient *aClient,
+ WGPUDeviceId aDeviceId)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_client_kill_pipeline_layout_id(const struct WGPUClient *aClient,
+ WGPUPipelineLayoutId aId)
+WGPU_FUNC;
+
+WGPU_INLINE
+WGPUBindGroupId wgpu_client_make_bind_group_id(const struct WGPUClient *aClient,
+ WGPUDeviceId aDeviceId)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_client_kill_bind_group_id(const struct WGPUClient *aClient,
+ WGPUBindGroupId aId)
+WGPU_FUNC;
+
+WGPU_INLINE
+WGPUShaderModuleId wgpu_client_create_shader_module(const struct WGPUClient *aClient,
+ WGPUDeviceId aDeviceId,
+ const struct WGPUShaderModuleDescriptor *aDesc,
+ WGPUByteBuf *aBb)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_client_kill_shader_module_id(const struct WGPUClient *aClient,
+ WGPUShaderModuleId aId)
+WGPU_FUNC;
+
+WGPU_INLINE
+WGPUComputePipelineId wgpu_client_create_compute_pipeline(const struct WGPUClient *aClient,
+ WGPUDeviceId aDeviceId,
+ const struct WGPUComputePipelineDescriptor *aDesc,
+ WGPUByteBuf *aBb)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_client_kill_compute_pipeline_id(const struct WGPUClient *aClient,
+ WGPUComputePipelineId aId)
+WGPU_FUNC;
+
+WGPU_INLINE
+WGPURenderPipelineId wgpu_client_create_render_pipeline(const struct WGPUClient *aClient,
+ WGPUDeviceId aDeviceId,
+ const struct WGPURenderPipelineDescriptor *aDesc,
+ WGPUByteBuf *aBb)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_client_kill_render_pipeline_id(const struct WGPUClient *aClient,
+ WGPURenderPipelineId aId)
+WGPU_FUNC;
+
+WGPU_INLINE
+struct WGPUGlobal *wgpu_server_new(struct WGPUIdentityRecyclerFactory aFactory)
+WGPU_FUNC;
+
+/**
+ * # Safety
+ *
+ * This function is unsafe because improper use may lead to memory
+ * problems. For example, a double-free may occur if the function is called
+ * twice on the same raw pointer.
+ */
+WGPU_INLINE
+void wgpu_server_delete(struct WGPUGlobal *aGlobal)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_server_poll_all_devices(const struct WGPUGlobal *aGlobal,
+ bool aForceWait)
+WGPU_FUNC;
+
+/**
+ * Request an adapter according to the specified options.
+ * Provide the list of IDs to pick from.
+ *
+ * Returns the index in this list, or -1 if unable to pick.
+ *
+ * # Safety
+ *
+ * This function is unsafe as there is no guarantee that the given pointer is
+ * valid for `id_length` elements.
+ */
+WGPU_INLINE
+int8_t wgpu_server_instance_request_adapter(const struct WGPUGlobal *aGlobal,
+ const WGPURequestAdapterOptions *aDesc,
+ const WGPUAdapterId *aIds,
+ uintptr_t aIdLength)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_server_adapter_request_device(const struct WGPUGlobal *aGlobal,
+ WGPUAdapterId aSelfId,
+ const struct WGPUDeviceDescriptor *aDesc,
+ WGPUDeviceId aNewId)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_server_adapter_drop(const struct WGPUGlobal *aGlobal,
+ WGPUAdapterId aAdapterId)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_server_device_drop(const struct WGPUGlobal *aGlobal,
+ WGPUDeviceId aSelfId)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_server_device_create_buffer(const struct WGPUGlobal *aGlobal,
+ WGPUDeviceId aSelfId,
+ const struct WGPUBufferDescriptor *aDesc,
+ WGPUBufferId aNewId)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_server_buffer_map(const struct WGPUGlobal *aGlobal,
+ WGPUBufferId aBufferId,
+ WGPUBufferAddress aStart,
+ WGPUBufferAddress aSize,
+ struct WGPUBufferMapOperation aOperation)
+WGPU_FUNC;
+
+/**
+ * # Safety
+ *
+ * This function is unsafe as there is no guarantee that the given pointer is
+ * valid for `size` elements.
+ */
+WGPU_INLINE
+uint8_t *wgpu_server_buffer_get_mapped_range(const struct WGPUGlobal *aGlobal,
+ WGPUBufferId aBufferId,
+ WGPUBufferAddress aStart,
+ WGPUOption_BufferSize aSize)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_server_buffer_unmap(const struct WGPUGlobal *aGlobal,
+ WGPUBufferId aBufferId)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_server_buffer_drop(const struct WGPUGlobal *aGlobal,
+ WGPUBufferId aSelfId)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_server_device_create_encoder(const struct WGPUGlobal *aGlobal,
+ WGPUDeviceId aSelfId,
+ const struct WGPUCommandEncoderDescriptor *aDesc,
+ WGPUCommandEncoderId aNewId)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_server_encoder_finish(const struct WGPUGlobal *aGlobal,
+ WGPUCommandEncoderId aSelfId,
+ const struct WGPUCommandBufferDescriptor *aDesc)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_server_encoder_drop(const struct WGPUGlobal *aGlobal,
+ WGPUCommandEncoderId aSelfId)
+WGPU_FUNC;
+
+/**
+ * # Safety
+ *
+ * This function is unsafe as there is no guarantee that the given pointer is
+ * valid for `byte_length` elements.
+ */
+WGPU_INLINE
+void wgpu_server_command_buffer_drop(const struct WGPUGlobal *aGlobal,
+ WGPUCommandBufferId aSelfId)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_server_encoder_copy_buffer_to_buffer(const struct WGPUGlobal *aGlobal,
+ WGPUCommandEncoderId aSelfId,
+ WGPUBufferId aSourceId,
+ WGPUBufferAddress aSourceOffset,
+ WGPUBufferId aDestinationId,
+ WGPUBufferAddress aDestinationOffset,
+ WGPUBufferAddress aSize)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_server_encoder_copy_texture_to_buffer(const struct WGPUGlobal *aGlobal,
+ WGPUCommandEncoderId aSelfId,
+ const WGPUTextureCopyView *aSource,
+ const WGPUBufferCopyView *aDestination,
+ const struct WGPUExtent3d *aSize)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_server_encoder_copy_buffer_to_texture(const struct WGPUGlobal *aGlobal,
+ WGPUCommandEncoderId aSelfId,
+ const WGPUBufferCopyView *aSource,
+ const WGPUTextureCopyView *aDestination,
+ const struct WGPUExtent3d *aSize)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_server_encoder_copy_texture_to_texture(const struct WGPUGlobal *aGlobal,
+ WGPUCommandEncoderId aSelfId,
+ const WGPUTextureCopyView *aSource,
+ const WGPUTextureCopyView *aDestination,
+ const struct WGPUExtent3d *aSize)
+WGPU_FUNC;
+
+/**
+ * # Safety
+ *
+ * This function is unsafe as there is no guarantee that the given pointers are
+ * valid for `color_attachments_length` and `command_length` elements,
+ * respectively.
+ */
+WGPU_INLINE
+void wgpu_server_encode_compute_pass(const struct WGPUGlobal *aGlobal,
+ WGPUCommandEncoderId aSelfId,
+ const WGPUByteBuf *aByteBuf)
+WGPU_FUNC;
+
+/**
+ * # Safety
+ *
+ * This function is unsafe as there is no guarantee that the given pointers are
+ * valid for `color_attachments_length` and `command_length` elements,
+ * respectively.
+ */
+WGPU_INLINE
+void wgpu_server_encode_render_pass(const struct WGPUGlobal *aGlobal,
+ WGPUCommandEncoderId aSelfId,
+ const struct WGPURenderPass *aPass)
+WGPU_FUNC;
+
+/**
+ * # Safety
+ *
+ * This function is unsafe as there is no guarantee that the given pointer is
+ * valid for `command_buffer_id_length` elements.
+ */
+WGPU_INLINE
+void wgpu_server_queue_submit(const struct WGPUGlobal *aGlobal,
+ WGPUQueueId aSelfId,
+ const WGPUCommandBufferId *aCommandBufferIds,
+ uintptr_t aCommandBufferIdLength)
+WGPU_FUNC;
+
+/**
+ * # Safety
+ *
+ * This function is unsafe as there is no guarantee that the given pointer is
+ * valid for `data_length` elements.
+ */
+WGPU_INLINE
+void wgpu_server_queue_write_buffer(const struct WGPUGlobal *aGlobal,
+ WGPUQueueId aSelfId,
+ WGPUBufferId aBufferId,
+ WGPUBufferAddress aBufferOffset,
+ const uint8_t *aData,
+ uintptr_t aDataLength)
+WGPU_FUNC;
+
+/**
+ * # Safety
+ *
+ * This function is unsafe as there is no guarantee that the given pointer is
+ * valid for `data_length` elements.
+ */
+WGPU_INLINE
+void wgpu_server_queue_write_texture(const struct WGPUGlobal *aGlobal,
+ WGPUQueueId aSelfId,
+ const WGPUTextureCopyView *aDestination,
+ const uint8_t *aData,
+ uintptr_t aDataLength,
+ const struct WGPUTextureDataLayout *aLayout,
+ const struct WGPUExtent3d *aExtent)
+WGPU_FUNC;
+
+/**
+ * # Safety
+ *
+ * This function is unsafe as there is no guarantee that the given pointer is
+ * valid for `entries_length` elements.
+ */
+WGPU_INLINE
+void wgpu_server_device_create_bind_group_layout(const struct WGPUGlobal *aGlobal,
+ WGPUDeviceId aSelfId,
+ const struct WGPUBindGroupLayoutDescriptor *aDesc,
+ WGPUBindGroupLayoutId aNewId)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_server_bind_group_layout_drop(const struct WGPUGlobal *aGlobal,
+ WGPUBindGroupLayoutId aSelfId)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_server_device_create_pipeline_layout(const struct WGPUGlobal *aGlobal,
+ WGPUDeviceId aSelfId,
+ const struct WGPUPipelineLayoutDescriptor *aDesc,
+ WGPUPipelineLayoutId aNewId)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_server_pipeline_layout_drop(const struct WGPUGlobal *aGlobal,
+ WGPUPipelineLayoutId aSelfId)
+WGPU_FUNC;
+
+/**
+ * # Safety
+ *
+ * This function is unsafe as there is no guarantee that the given pointer is
+ * valid for `entries_length` elements.
+ */
+WGPU_INLINE
+void wgpu_server_device_create_bind_group(const struct WGPUGlobal *aGlobal,
+ WGPUDeviceId aSelfId,
+ const struct WGPUBindGroupDescriptor *aDesc,
+ WGPUBindGroupId aNewId)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_server_bind_group_drop(const struct WGPUGlobal *aGlobal,
+ WGPUBindGroupId aSelfId)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_server_shader_module_drop(const struct WGPUGlobal *aGlobal,
+ WGPUShaderModuleId aSelfId)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_server_compute_pipeline_drop(const struct WGPUGlobal *aGlobal,
+ WGPUComputePipelineId aSelfId)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_server_render_pipeline_drop(const struct WGPUGlobal *aGlobal,
+ WGPURenderPipelineId aSelfId)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_server_device_create_texture(const struct WGPUGlobal *aGlobal,
+ WGPUDeviceId aSelfId,
+ const struct WGPUTextureDescriptor *aDesc,
+ WGPUTextureId aNewId)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_server_texture_create_view(const struct WGPUGlobal *aGlobal,
+ WGPUTextureId aSelfId,
+ const struct WGPUTextureViewDescriptor *aDesc,
+ WGPUTextureViewId aNewId)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_server_texture_drop(const struct WGPUGlobal *aGlobal,
+ WGPUTextureId aSelfId)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_server_texture_view_drop(const struct WGPUGlobal *aGlobal,
+ WGPUTextureViewId aSelfId)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_server_device_create_sampler(const struct WGPUGlobal *aGlobal,
+ WGPUDeviceId aSelfId,
+ const struct WGPUSamplerDescriptor *aDesc,
+ WGPUSamplerId aNewId)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_server_sampler_drop(const struct WGPUGlobal *aGlobal,
+ WGPUSamplerId aSelfId)
+WGPU_FUNC;
+
+/**
+ * # Safety
+ *
+ * This function is unsafe as there is no guarantee that the given pointer is
+ * valid for `offset_length` elements.
+ */
+WGPU_INLINE
+void wgpu_render_bundle_set_bind_group(struct WGPURenderBundleEncoder *aBundle,
+ uint32_t aIndex,
+ WGPUBindGroupId aBindGroupId,
+ const WGPUDynamicOffset *aOffsets,
+ uintptr_t aOffsetLength)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_render_bundle_set_pipeline(struct WGPURenderBundleEncoder *aBundle,
+ WGPURenderPipelineId aPipelineId)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_render_bundle_set_index_buffer(struct WGPURenderBundleEncoder *aBundle,
+ WGPUBufferId aBufferId,
+ WGPUBufferAddress aOffset,
+ WGPUOption_BufferSize aSize)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_render_bundle_set_vertex_buffer(struct WGPURenderBundleEncoder *aBundle,
+ uint32_t aSlot,
+ WGPUBufferId aBufferId,
+ WGPUBufferAddress aOffset,
+ WGPUOption_BufferSize aSize)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_render_bundle_set_push_constants(struct WGPURenderBundleEncoder *aPass,
+ WGPUShaderStage aStages,
+ uint32_t aOffset,
+ uint32_t aSizeBytes,
+ const uint8_t *aData)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_render_bundle_draw(struct WGPURenderBundleEncoder *aBundle,
+ uint32_t aVertexCount,
+ uint32_t aInstanceCount,
+ uint32_t aFirstVertex,
+ uint32_t aFirstInstance)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_render_bundle_draw_indexed(struct WGPURenderBundleEncoder *aBundle,
+ uint32_t aIndexCount,
+ uint32_t aInstanceCount,
+ uint32_t aFirstIndex,
+ int32_t aBaseVertex,
+ uint32_t aFirstInstance)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_render_bundle_draw_indirect(struct WGPURenderBundleEncoder *aBundle,
+ WGPUBufferId aBufferId,
+ WGPUBufferAddress aOffset)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_render_pass_bundle_indexed_indirect(struct WGPURenderBundleEncoder *aBundle,
+ WGPUBufferId aBufferId,
+ WGPUBufferAddress aOffset)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_render_bundle_push_debug_group(struct WGPURenderBundleEncoder *aBundle,
+ WGPURawString aLabel)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_render_bundle_pop_debug_group(struct WGPURenderBundleEncoder *aBundle)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_render_bundle_insert_debug_marker(struct WGPURenderBundleEncoder *aBundle,
+ WGPURawString aLabel)
+WGPU_FUNC;
+
+/**
+ * # Safety
+ *
+ * This function is unsafe as there is no guarantee that the given pointer is
+ * valid for `offset_length` elements.
+ */
+WGPU_INLINE
+void wgpu_compute_pass_set_bind_group(struct WGPUComputePass *aPass,
+ uint32_t aIndex,
+ WGPUBindGroupId aBindGroupId,
+ const WGPUDynamicOffset *aOffsets,
+ uintptr_t aOffsetLength)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_compute_pass_set_pipeline(struct WGPUComputePass *aPass,
+ WGPUComputePipelineId aPipelineId)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_compute_pass_set_push_constant(struct WGPUComputePass *aPass,
+ uint32_t aOffset,
+ uint32_t aSizeBytes,
+ const uint8_t *aData)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_compute_pass_dispatch(struct WGPUComputePass *aPass,
+ uint32_t aGroupsX,
+ uint32_t aGroupsY,
+ uint32_t aGroupsZ)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_compute_pass_dispatch_indirect(struct WGPUComputePass *aPass,
+ WGPUBufferId aBufferId,
+ WGPUBufferAddress aOffset)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_compute_pass_push_debug_group(struct WGPUComputePass *aPass,
+ WGPURawString aLabel,
+ uint32_t aColor)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_compute_pass_pop_debug_group(struct WGPUComputePass *aPass)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_compute_pass_insert_debug_marker(struct WGPUComputePass *aPass,
+ WGPURawString aLabel,
+ uint32_t aColor)
+WGPU_FUNC;
+
+/**
+ * # Safety
+ *
+ * This function is unsafe as there is no guarantee that the given pointer is
+ * valid for `offset_length` elements.
+ */
+WGPU_INLINE
+void wgpu_render_pass_set_bind_group(struct WGPURenderPass *aPass,
+ uint32_t aIndex,
+ WGPUBindGroupId aBindGroupId,
+ const WGPUDynamicOffset *aOffsets,
+ uintptr_t aOffsetLength)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_render_pass_set_pipeline(struct WGPURenderPass *aPass,
+ WGPURenderPipelineId aPipelineId)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_render_pass_set_index_buffer(struct WGPURenderPass *aPass,
+ WGPUBufferId aBufferId,
+ WGPUBufferAddress aOffset,
+ WGPUOption_BufferSize aSize)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_render_pass_set_vertex_buffer(struct WGPURenderPass *aPass,
+ uint32_t aSlot,
+ WGPUBufferId aBufferId,
+ WGPUBufferAddress aOffset,
+ WGPUOption_BufferSize aSize)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_render_pass_set_blend_color(struct WGPURenderPass *aPass,
+ const struct WGPUColor *aColor)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_render_pass_set_stencil_reference(struct WGPURenderPass *aPass,
+ uint32_t aValue)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_render_pass_set_viewport(struct WGPURenderPass *aPass,
+ float aX,
+ float aY,
+ float aW,
+ float aH,
+ float aDepthMin,
+ float aDepthMax)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_render_pass_set_scissor_rect(struct WGPURenderPass *aPass,
+ uint32_t aX,
+ uint32_t aY,
+ uint32_t aW,
+ uint32_t aH)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_render_pass_set_push_constants(struct WGPURenderPass *aPass,
+ WGPUShaderStage aStages,
+ uint32_t aOffset,
+ uint32_t aSizeBytes,
+ const uint8_t *aData)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_render_pass_draw(struct WGPURenderPass *aPass,
+ uint32_t aVertexCount,
+ uint32_t aInstanceCount,
+ uint32_t aFirstVertex,
+ uint32_t aFirstInstance)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_render_pass_draw_indexed(struct WGPURenderPass *aPass,
+ uint32_t aIndexCount,
+ uint32_t aInstanceCount,
+ uint32_t aFirstIndex,
+ int32_t aBaseVertex,
+ uint32_t aFirstInstance)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_render_pass_draw_indirect(struct WGPURenderPass *aPass,
+ WGPUBufferId aBufferId,
+ WGPUBufferAddress aOffset)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_render_pass_draw_indexed_indirect(struct WGPURenderPass *aPass,
+ WGPUBufferId aBufferId,
+ WGPUBufferAddress aOffset)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_render_pass_multi_draw_indirect(struct WGPURenderPass *aPass,
+ WGPUBufferId aBufferId,
+ WGPUBufferAddress aOffset,
+ uint32_t aCount)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_render_pass_multi_draw_indexed_indirect(struct WGPURenderPass *aPass,
+ WGPUBufferId aBufferId,
+ WGPUBufferAddress aOffset,
+ uint32_t aCount)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_render_pass_multi_draw_indirect_count(struct WGPURenderPass *aPass,
+ WGPUBufferId aBufferId,
+ WGPUBufferAddress aOffset,
+ WGPUBufferId aCountBufferId,
+ WGPUBufferAddress aCountBufferOffset,
+ uint32_t aMaxCount)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_render_pass_multi_draw_indexed_indirect_count(struct WGPURenderPass *aPass,
+ WGPUBufferId aBufferId,
+ WGPUBufferAddress aOffset,
+ WGPUBufferId aCountBufferId,
+ WGPUBufferAddress aCountBufferOffset,
+ uint32_t aMaxCount)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_render_pass_push_debug_group(struct WGPURenderPass *aPass,
+ WGPURawString aLabel,
+ uint32_t aColor)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_render_pass_pop_debug_group(struct WGPURenderPass *aPass)
+WGPU_FUNC;
+
+WGPU_INLINE
+void wgpu_render_pass_insert_debug_marker(struct WGPURenderPass *aPass,
+ WGPURawString aLabel,
+ uint32_t aColor)
+WGPU_FUNC;
diff --git a/dom/webgpu/ipc/PWebGPU.ipdl b/dom/webgpu/ipc/PWebGPU.ipdl
new file mode 100644
index 0000000000..98bece449d
--- /dev/null
+++ b/dom/webgpu/ipc/PWebGPU.ipdl
@@ -0,0 +1,94 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: sw=2 ts=8 et :
+ */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+using layers::RGBDescriptor from "mozilla/layers/LayersSurfaces.h";
+using wr::ExternalImageId from "mozilla/webrender/WebRenderAPI.h";
+using RawId from "mozilla/webgpu/WebGPUTypes.h";
+using BufferAddress from "mozilla/webgpu/WebGPUTypes.h";
+using dom::GPURequestAdapterOptions from "mozilla/dom/WebGPUBinding.h";
+using dom::GPUDeviceDescriptor from "mozilla/dom/WebGPUBinding.h";
+using dom::GPUCommandBufferDescriptor from "mozilla/dom/WebGPUBinding.h";
+using webgpu::ffi::WGPUTextureDataLayout from "mozilla/webgpu/ffi/wgpu.h";
+using webgpu::ffi::WGPUTextureCopyView from "mozilla/webgpu/ffi/wgpu.h";
+using webgpu::ffi::WGPUExtent3d from "mozilla/webgpu/ffi/wgpu.h";
+using webgpu::ffi::WGPUHostMap from "mozilla/webgpu/ffi/wgpu.h";
+
+include "mozilla/ipc/ByteBufUtils.h";
+include "mozilla/webgpu/WebGPUSerialize.h";
+include "mozilla/layers/WebRenderMessageUtils.h";
+include protocol PCompositorBridge;
+
+namespace mozilla {
+namespace webgpu {
+
+/**
+ * Represents the connection between a WebGPUChild actor that issues WebGPU
+ * command from the content process, and a WebGPUParent in the compositor
+ * process that runs the commands.
+ */
+async protocol PWebGPU
+{
+ manager PCompositorBridge;
+
+parent:
+ async DeviceAction(RawId selfId, ByteBuf buf);
+ async TextureAction(RawId selfId, RawId aDeviceId, ByteBuf buf);
+ async CommandEncoderAction(RawId selfId, RawId aDeviceId, ByteBuf buf);
+ async BumpImplicitBindGroupLayout(RawId pipelineId, bool isCompute, uint32_t index, RawId assignId);
+
+ async InstanceRequestAdapter(GPURequestAdapterOptions options, RawId[] ids) returns (RawId adapterId);
+ async AdapterRequestDevice(RawId selfId, GPUDeviceDescriptor desc, RawId newId);
+ async AdapterDestroy(RawId selfId);
+ async BufferReturnShmem(RawId selfId, Shmem shmem);
+ async BufferMap(RawId selfId, WGPUHostMap hostMap, uint64_t offset, uint64_t size) returns (Shmem sm);
+ async BufferUnmap(RawId selfId, Shmem shmem, bool flush);
+ async BufferDestroy(RawId selfId);
+ async TextureDestroy(RawId selfId);
+ async TextureViewDestroy(RawId selfId);
+ async SamplerDestroy(RawId selfId);
+ async DeviceDestroy(RawId selfId);
+
+ async CommandEncoderFinish(RawId selfId, RawId deviceId, GPUCommandBufferDescriptor desc);
+ async CommandEncoderDestroy(RawId selfId);
+ async CommandBufferDestroy(RawId selfId);
+ async QueueSubmit(RawId selfId, RawId[] commandBuffers);
+ async QueueWriteBuffer(RawId selfId, RawId bufferId, BufferAddress bufferOffset, Shmem shmem);
+ async QueueWriteTexture(RawId selfId, WGPUTextureCopyView destination, Shmem shmem, WGPUTextureDataLayout layout, WGPUExtent3d extent);
+
+ async BindGroupLayoutDestroy(RawId selfId);
+ async PipelineLayoutDestroy(RawId selfId);
+ async BindGroupDestroy(RawId selfId);
+ async ShaderModuleDestroy(RawId selfId);
+ async ComputePipelineDestroy(RawId selfId);
+ async RenderPipelineDestroy(RawId selfId);
+ async DeviceCreateSwapChain(RawId selfId, RawId queueId, RGBDescriptor desc, RawId[] bufferIds, ExternalImageId externalId);
+ async SwapChainPresent(ExternalImageId externalId, RawId textureId, RawId commandEncoderId);
+ async SwapChainDestroy(ExternalImageId externalId);
+
+ async Shutdown();
+
+child:
+ async Error(RawId aDeviceId, nsCString message);
+ async DropAction(ByteBuf buf);
+ async FreeAdapter(RawId id);
+ async FreeDevice(RawId id);
+ async FreePipelineLayout(RawId id);
+ async FreeShaderModule(RawId id);
+ async FreeBindGroupLayout(RawId id);
+ async FreeBindGroup(RawId id);
+ async FreeCommandBuffer(RawId id);
+ async FreeRenderPipeline(RawId id);
+ async FreeComputePipeline(RawId id);
+ async FreeBuffer(RawId id);
+ async FreeTexture(RawId id);
+ async FreeTextureView(RawId id);
+ async FreeSampler(RawId id);
+ async __delete__();
+};
+
+} // webgpu
+} // mozilla
diff --git a/dom/webgpu/ipc/WebGPUChild.cpp b/dom/webgpu/ipc/WebGPUChild.cpp
new file mode 100644
index 0000000000..3ba2f84eb0
--- /dev/null
+++ b/dom/webgpu/ipc/WebGPUChild.cpp
@@ -0,0 +1,697 @@
+/* -*- Mode: C++; tab-width: 20; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "WebGPUChild.h"
+#include "mozilla/EnumTypeTraits.h"
+#include "mozilla/dom/WebGPUBinding.h"
+#include "mozilla/dom/GPUUncapturedErrorEvent.h"
+#include "mozilla/webgpu/ValidationError.h"
+#include "mozilla/webgpu/ffi/wgpu.h"
+#include "Sampler.h"
+
+namespace mozilla {
+namespace webgpu {
+
+NS_IMPL_CYCLE_COLLECTION(WebGPUChild)
+NS_IMPL_CYCLE_COLLECTION_ROOT_NATIVE(WebGPUChild, AddRef)
+NS_IMPL_CYCLE_COLLECTION_UNROOT_NATIVE(WebGPUChild, Release)
+
+static ffi::WGPUCompareFunction ConvertCompareFunction(
+ const dom::GPUCompareFunction& aCompare) {
+ // Value of 0 = Undefined is reserved on the C side for "null" semantics.
+ return ffi::WGPUCompareFunction(UnderlyingValue(aCompare) + 1);
+}
+
+static ffi::WGPUClient* initialize() {
+ ffi::WGPUInfrastructure infra = ffi::wgpu_client_new();
+ return infra.client;
+}
+
+WebGPUChild::WebGPUChild() : mClient(initialize()), mIPCOpen(false) {}
+
+WebGPUChild::~WebGPUChild() {
+ if (mClient) {
+ ffi::wgpu_client_delete(mClient);
+ }
+}
+
+RefPtr<RawIdPromise> WebGPUChild::InstanceRequestAdapter(
+ const dom::GPURequestAdapterOptions& aOptions) {
+ const int max_ids = 10;
+ RawId ids[max_ids] = {0};
+ unsigned long count =
+ ffi::wgpu_client_make_adapter_ids(mClient, ids, max_ids);
+
+ nsTArray<RawId> sharedIds(count);
+ for (unsigned long i = 0; i != count; ++i) {
+ sharedIds.AppendElement(ids[i]);
+ }
+
+ return SendInstanceRequestAdapter(aOptions, sharedIds)
+ ->Then(
+ GetCurrentSerialEventTarget(), __func__,
+ [](const RawId& aId) {
+ return aId == 0 ? RawIdPromise::CreateAndReject(Nothing(), __func__)
+ : RawIdPromise::CreateAndResolve(aId, __func__);
+ },
+ [](const ipc::ResponseRejectReason& aReason) {
+ return RawIdPromise::CreateAndReject(Some(aReason), __func__);
+ });
+}
+
+Maybe<RawId> WebGPUChild::AdapterRequestDevice(
+ RawId aSelfId, const dom::GPUDeviceDescriptor& aDesc) {
+ RawId id = ffi::wgpu_client_make_device_id(mClient, aSelfId);
+ if (SendAdapterRequestDevice(aSelfId, aDesc, id)) {
+ return Some(id);
+ }
+ ffi::wgpu_client_kill_device_id(mClient, id);
+ return Nothing();
+}
+
+RawId WebGPUChild::DeviceCreateBuffer(RawId aSelfId,
+ const dom::GPUBufferDescriptor& aDesc) {
+ ffi::WGPUBufferDescriptor desc = {};
+ nsCString label;
+ if (aDesc.mLabel.WasPassed()) {
+ LossyCopyUTF16toASCII(aDesc.mLabel.Value(), label);
+ desc.label = label.get();
+ }
+ desc.size = aDesc.mSize;
+ desc.usage = aDesc.mUsage;
+ desc.mapped_at_creation = aDesc.mMappedAtCreation;
+
+ ByteBuf bb;
+ RawId id =
+ ffi::wgpu_client_create_buffer(mClient, aSelfId, &desc, ToFFI(&bb));
+ if (!SendDeviceAction(aSelfId, std::move(bb))) {
+ MOZ_CRASH("IPC failure");
+ }
+ return id;
+}
+
+RawId WebGPUChild::DeviceCreateTexture(RawId aSelfId,
+ const dom::GPUTextureDescriptor& aDesc) {
+ ffi::WGPUTextureDescriptor desc = {};
+ nsCString label;
+ if (aDesc.mLabel.WasPassed()) {
+ LossyCopyUTF16toASCII(aDesc.mLabel.Value(), label);
+ desc.label = label.get();
+ }
+ if (aDesc.mSize.IsRangeEnforcedUnsignedLongSequence()) {
+ const auto& seq = aDesc.mSize.GetAsRangeEnforcedUnsignedLongSequence();
+ desc.size.width = seq.Length() > 0 ? seq[0] : 1;
+ desc.size.height = seq.Length() > 1 ? seq[1] : 1;
+ desc.size.depth = seq.Length() > 2 ? seq[2] : 1;
+ } else if (aDesc.mSize.IsGPUExtent3DDict()) {
+ const auto& dict = aDesc.mSize.GetAsGPUExtent3DDict();
+ desc.size.width = dict.mWidth;
+ desc.size.height = dict.mHeight;
+ desc.size.depth = dict.mDepth;
+ } else {
+ MOZ_CRASH("Unexpected union");
+ }
+ desc.mip_level_count = aDesc.mMipLevelCount;
+ desc.sample_count = aDesc.mSampleCount;
+ desc.dimension = ffi::WGPUTextureDimension(aDesc.mDimension);
+ desc.format = ffi::WGPUTextureFormat(aDesc.mFormat);
+ desc.usage = aDesc.mUsage;
+
+ ByteBuf bb;
+ RawId id =
+ ffi::wgpu_client_create_texture(mClient, aSelfId, &desc, ToFFI(&bb));
+ if (!SendDeviceAction(aSelfId, std::move(bb))) {
+ MOZ_CRASH("IPC failure");
+ }
+ return id;
+}
+
+RawId WebGPUChild::TextureCreateView(
+ RawId aSelfId, RawId aDeviceId,
+ const dom::GPUTextureViewDescriptor& aDesc) {
+ ffi::WGPUTextureViewDescriptor desc = {};
+ nsCString label;
+ if (aDesc.mLabel.WasPassed()) {
+ LossyCopyUTF16toASCII(aDesc.mLabel.Value(), label);
+ desc.label = label.get();
+ }
+
+ ffi::WGPUTextureFormat format = ffi::WGPUTextureFormat_Sentinel;
+ if (aDesc.mFormat.WasPassed()) {
+ format = ffi::WGPUTextureFormat(aDesc.mFormat.Value());
+ desc.format = &format;
+ }
+ ffi::WGPUTextureViewDimension dimension =
+ ffi::WGPUTextureViewDimension_Sentinel;
+ if (aDesc.mDimension.WasPassed()) {
+ dimension = ffi::WGPUTextureViewDimension(aDesc.mDimension.Value());
+ desc.dimension = &dimension;
+ }
+
+ desc.aspect = ffi::WGPUTextureAspect(aDesc.mAspect);
+ desc.base_mip_level = aDesc.mBaseMipLevel;
+ desc.level_count =
+ aDesc.mMipLevelCount.WasPassed() ? aDesc.mMipLevelCount.Value() : 0;
+ desc.base_array_layer = aDesc.mBaseArrayLayer;
+ desc.array_layer_count =
+ aDesc.mArrayLayerCount.WasPassed() ? aDesc.mArrayLayerCount.Value() : 0;
+
+ ByteBuf bb;
+ RawId id =
+ ffi::wgpu_client_create_texture_view(mClient, aSelfId, &desc, ToFFI(&bb));
+ if (!SendTextureAction(aSelfId, aDeviceId, std::move(bb))) {
+ MOZ_CRASH("IPC failure");
+ }
+ return id;
+}
+
+RawId WebGPUChild::DeviceCreateSampler(RawId aSelfId,
+ const dom::GPUSamplerDescriptor& aDesc) {
+ ffi::WGPUSamplerDescriptor desc = {};
+ nsCString label;
+ if (aDesc.mLabel.WasPassed()) {
+ LossyCopyUTF16toASCII(aDesc.mLabel.Value(), label);
+ desc.label = label.get();
+ }
+
+ desc.address_modes[0] = ffi::WGPUAddressMode(aDesc.mAddressModeU);
+ desc.address_modes[1] = ffi::WGPUAddressMode(aDesc.mAddressModeV);
+ desc.address_modes[2] = ffi::WGPUAddressMode(aDesc.mAddressModeW);
+ desc.mag_filter = ffi::WGPUFilterMode(aDesc.mMagFilter);
+ desc.min_filter = ffi::WGPUFilterMode(aDesc.mMinFilter);
+ desc.mipmap_filter = ffi::WGPUFilterMode(aDesc.mMipmapFilter);
+ desc.lod_min_clamp = aDesc.mLodMinClamp;
+ desc.lod_max_clamp = aDesc.mLodMaxClamp;
+
+ ffi::WGPUCompareFunction comparison = ffi::WGPUCompareFunction_Sentinel;
+ if (aDesc.mCompare.WasPassed()) {
+ comparison = ConvertCompareFunction(aDesc.mCompare.Value());
+ desc.compare = &comparison;
+ }
+
+ ByteBuf bb;
+ RawId id =
+ ffi::wgpu_client_create_sampler(mClient, aSelfId, &desc, ToFFI(&bb));
+ if (!SendDeviceAction(aSelfId, std::move(bb))) {
+ MOZ_CRASH("IPC failure");
+ }
+ return id;
+}
+
+RawId WebGPUChild::DeviceCreateCommandEncoder(
+ RawId aSelfId, const dom::GPUCommandEncoderDescriptor& aDesc) {
+ ffi::WGPUCommandEncoderDescriptor desc = {};
+ nsCString label;
+ if (aDesc.mLabel.WasPassed()) {
+ LossyCopyUTF16toASCII(aDesc.mLabel.Value(), label);
+ desc.label = label.get();
+ }
+
+ ByteBuf bb;
+ RawId id = ffi::wgpu_client_create_command_encoder(mClient, aSelfId, &desc,
+ ToFFI(&bb));
+ if (!SendDeviceAction(aSelfId, std::move(bb))) {
+ MOZ_CRASH("IPC failure");
+ }
+ return id;
+}
+
+RawId WebGPUChild::CommandEncoderFinish(
+ RawId aSelfId, RawId aDeviceId,
+ const dom::GPUCommandBufferDescriptor& aDesc) {
+ if (!SendCommandEncoderFinish(aSelfId, aDeviceId, aDesc)) {
+ MOZ_CRASH("IPC failure");
+ }
+ // We rely on knowledge that `CommandEncoderId` == `CommandBufferId`
+ // TODO: refactor this to truly behave as if the encoder is being finished,
+ // and a new command buffer ID is being created from it. Resolve the ID
+ // type aliasing at the place that introduces it: `wgpu-core`.
+ return aSelfId;
+}
+
+RawId WebGPUChild::DeviceCreateBindGroupLayout(
+ RawId aSelfId, const dom::GPUBindGroupLayoutDescriptor& aDesc) {
+ struct OptionalData {
+ ffi::WGPUTextureViewDimension dim;
+ ffi::WGPURawTextureSampleType type;
+ ffi::WGPUTextureFormat format;
+ };
+ nsTArray<OptionalData> optional(aDesc.mEntries.Length());
+ for (const auto& entry : aDesc.mEntries) {
+ OptionalData data = {};
+ if (entry.mViewDimension.WasPassed()) {
+ data.dim = ffi::WGPUTextureViewDimension(entry.mViewDimension.Value());
+ }
+ if (entry.mTextureComponentType.WasPassed()) {
+ switch (entry.mTextureComponentType.Value()) {
+ case dom::GPUTextureComponentType::Float:
+ data.type = ffi::WGPURawTextureSampleType_Float;
+ break;
+ case dom::GPUTextureComponentType::Uint:
+ data.type = ffi::WGPURawTextureSampleType_Uint;
+ break;
+ case dom::GPUTextureComponentType::Sint:
+ data.type = ffi::WGPURawTextureSampleType_Sint;
+ break;
+ case dom::GPUTextureComponentType::Depth_comparison:
+ data.type = ffi::WGPURawTextureSampleType_Depth;
+ break;
+ default:
+ MOZ_ASSERT_UNREACHABLE();
+ break;
+ }
+ }
+ if (entry.mStorageTextureFormat.WasPassed()) {
+ data.format = ffi::WGPUTextureFormat(entry.mStorageTextureFormat.Value());
+ }
+ optional.AppendElement(data);
+ }
+
+ nsTArray<ffi::WGPUBindGroupLayoutEntry> entries(aDesc.mEntries.Length());
+ for (size_t i = 0; i < aDesc.mEntries.Length(); ++i) {
+ const auto& entry = aDesc.mEntries[i];
+ ffi::WGPUBindGroupLayoutEntry e = {};
+ e.binding = entry.mBinding;
+ e.visibility = entry.mVisibility;
+ e.ty = ffi::WGPURawBindingType(entry.mType);
+ e.multisampled = entry.mMultisampled;
+ e.has_dynamic_offset = entry.mHasDynamicOffset;
+ if (entry.mViewDimension.WasPassed()) {
+ e.view_dimension = &optional[i].dim;
+ }
+ if (entry.mTextureComponentType.WasPassed()) {
+ e.texture_sample_type = &optional[i].type;
+ }
+ if (entry.mStorageTextureFormat.WasPassed()) {
+ e.storage_texture_format = &optional[i].format;
+ }
+ entries.AppendElement(e);
+ }
+
+ ffi::WGPUBindGroupLayoutDescriptor desc = {};
+ nsCString label;
+ if (aDesc.mLabel.WasPassed()) {
+ LossyCopyUTF16toASCII(aDesc.mLabel.Value(), label);
+ desc.label = label.get();
+ }
+ desc.entries = entries.Elements();
+ desc.entries_length = entries.Length();
+
+ ByteBuf bb;
+ RawId id = ffi::wgpu_client_create_bind_group_layout(mClient, aSelfId, &desc,
+ ToFFI(&bb));
+ if (!SendDeviceAction(aSelfId, std::move(bb))) {
+ MOZ_CRASH("IPC failure");
+ }
+ return id;
+}
+
+RawId WebGPUChild::DeviceCreatePipelineLayout(
+ RawId aSelfId, const dom::GPUPipelineLayoutDescriptor& aDesc) {
+ nsTArray<ffi::WGPUBindGroupLayoutId> bindGroupLayouts(
+ aDesc.mBindGroupLayouts.Length());
+ for (const auto& layout : aDesc.mBindGroupLayouts) {
+ bindGroupLayouts.AppendElement(layout->mId);
+ }
+
+ ffi::WGPUPipelineLayoutDescriptor desc = {};
+ nsCString label;
+ if (aDesc.mLabel.WasPassed()) {
+ LossyCopyUTF16toASCII(aDesc.mLabel.Value(), label);
+ desc.label = label.get();
+ }
+ desc.bind_group_layouts = bindGroupLayouts.Elements();
+ desc.bind_group_layouts_length = bindGroupLayouts.Length();
+
+ ByteBuf bb;
+ RawId id = ffi::wgpu_client_create_pipeline_layout(mClient, aSelfId, &desc,
+ ToFFI(&bb));
+ if (!SendDeviceAction(aSelfId, std::move(bb))) {
+ MOZ_CRASH("IPC failure");
+ }
+ return id;
+}
+
+RawId WebGPUChild::DeviceCreateBindGroup(
+ RawId aSelfId, const dom::GPUBindGroupDescriptor& aDesc) {
+ nsTArray<ffi::WGPUBindGroupEntry> entries(aDesc.mEntries.Length());
+ for (const auto& entry : aDesc.mEntries) {
+ ffi::WGPUBindGroupEntry e = {};
+ e.binding = entry.mBinding;
+ if (entry.mResource.IsGPUBufferBinding()) {
+ const auto& bufBinding = entry.mResource.GetAsGPUBufferBinding();
+ e.buffer = bufBinding.mBuffer->mId;
+ e.offset = bufBinding.mOffset;
+ e.size = bufBinding.mSize.WasPassed() ? bufBinding.mSize.Value() : 0;
+ }
+ if (entry.mResource.IsGPUTextureView()) {
+ e.texture_view = entry.mResource.GetAsGPUTextureView()->mId;
+ }
+ if (entry.mResource.IsGPUSampler()) {
+ e.sampler = entry.mResource.GetAsGPUSampler()->mId;
+ }
+ entries.AppendElement(e);
+ }
+
+ ffi::WGPUBindGroupDescriptor desc = {};
+ nsCString label;
+ if (aDesc.mLabel.WasPassed()) {
+ LossyCopyUTF16toASCII(aDesc.mLabel.Value(), label);
+ desc.label = label.get();
+ }
+ desc.layout = aDesc.mLayout->mId;
+ desc.entries = entries.Elements();
+ desc.entries_length = entries.Length();
+
+ ByteBuf bb;
+ RawId id =
+ ffi::wgpu_client_create_bind_group(mClient, aSelfId, &desc, ToFFI(&bb));
+ if (!SendDeviceAction(aSelfId, std::move(bb))) {
+ MOZ_CRASH("IPC failure");
+ }
+ return id;
+}
+
+RawId WebGPUChild::DeviceCreateShaderModule(
+ RawId aSelfId, const dom::GPUShaderModuleDescriptor& aDesc) {
+ ffi::WGPUShaderModuleDescriptor desc = {};
+
+ nsCString wgsl;
+ if (aDesc.mCode.IsString()) {
+ LossyCopyUTF16toASCII(aDesc.mCode.GetAsString(), wgsl);
+ desc.wgsl_chars = wgsl.get();
+ } else {
+ const auto& code = aDesc.mCode.GetAsUint32Array();
+ code.ComputeState();
+ desc.spirv_words = code.Data();
+ desc.spirv_words_length = code.Length();
+ }
+
+ ByteBuf bb;
+ RawId id = ffi::wgpu_client_create_shader_module(mClient, aSelfId, &desc,
+ ToFFI(&bb));
+ if (!SendDeviceAction(aSelfId, std::move(bb))) {
+ MOZ_CRASH("IPC failure");
+ }
+ return id;
+}
+
+RawId WebGPUChild::DeviceCreateComputePipeline(
+ RawId aSelfId, const dom::GPUComputePipelineDescriptor& aDesc,
+ nsTArray<RawId>* const aImplicitBindGroupLayoutIds) {
+ ffi::WGPUComputePipelineDescriptor desc = {};
+ nsCString label, entryPoint;
+ if (aDesc.mLabel.WasPassed()) {
+ LossyCopyUTF16toASCII(aDesc.mLabel.Value(), label);
+ desc.label = label.get();
+ }
+ if (aDesc.mLayout.WasPassed()) {
+ desc.layout = aDesc.mLayout.Value().mId;
+ }
+ desc.compute_stage.module = aDesc.mComputeStage.mModule->mId;
+ LossyCopyUTF16toASCII(aDesc.mComputeStage.mEntryPoint, entryPoint);
+ desc.compute_stage.entry_point = entryPoint.get();
+
+ ByteBuf bb;
+ RawId implicit_bgl_ids[WGPUMAX_BIND_GROUPS] = {};
+ RawId id = ffi::wgpu_client_create_compute_pipeline(
+ mClient, aSelfId, &desc, ToFFI(&bb), implicit_bgl_ids);
+
+ for (const auto& cur : implicit_bgl_ids) {
+ if (!cur) break;
+ aImplicitBindGroupLayoutIds->AppendElement(cur);
+ }
+ if (!SendDeviceAction(aSelfId, std::move(bb))) {
+ MOZ_CRASH("IPC failure");
+ }
+ return id;
+}
+
+static ffi::WGPURasterizationStateDescriptor ConvertRasterizationDescriptor(
+ const dom::GPURasterizationStateDescriptor& aDesc) {
+ ffi::WGPURasterizationStateDescriptor desc = {};
+ desc.front_face = ffi::WGPUFrontFace(aDesc.mFrontFace);
+ desc.cull_mode = ffi::WGPUCullMode(aDesc.mCullMode);
+ desc.depth_bias = aDesc.mDepthBias;
+ desc.depth_bias_slope_scale = aDesc.mDepthBiasSlopeScale;
+ desc.depth_bias_clamp = aDesc.mDepthBiasClamp;
+ return desc;
+}
+
+static ffi::WGPUBlendDescriptor ConvertBlendDescriptor(
+ const dom::GPUBlendDescriptor& aDesc) {
+ ffi::WGPUBlendDescriptor desc = {};
+ desc.src_factor = ffi::WGPUBlendFactor(aDesc.mSrcFactor);
+ desc.dst_factor = ffi::WGPUBlendFactor(aDesc.mDstFactor);
+ desc.operation = ffi::WGPUBlendOperation(aDesc.mOperation);
+ return desc;
+}
+
+static ffi::WGPUColorStateDescriptor ConvertColorDescriptor(
+ const dom::GPUColorStateDescriptor& aDesc) {
+ ffi::WGPUColorStateDescriptor desc = {};
+ desc.format = ffi::WGPUTextureFormat(aDesc.mFormat);
+ desc.alpha_blend = ConvertBlendDescriptor(aDesc.mAlphaBlend);
+ desc.color_blend = ConvertBlendDescriptor(aDesc.mColorBlend);
+ desc.write_mask = aDesc.mWriteMask;
+ return desc;
+}
+
+static ffi::WGPUStencilStateFaceDescriptor ConvertStencilFaceDescriptor(
+ const dom::GPUStencilStateFaceDescriptor& aDesc) {
+ ffi::WGPUStencilStateFaceDescriptor desc = {};
+ desc.compare = ConvertCompareFunction(aDesc.mCompare);
+ desc.fail_op = ffi::WGPUStencilOperation(aDesc.mFailOp);
+ desc.depth_fail_op = ffi::WGPUStencilOperation(aDesc.mDepthFailOp);
+ desc.pass_op = ffi::WGPUStencilOperation(aDesc.mPassOp);
+ return desc;
+}
+
+static ffi::WGPUDepthStencilStateDescriptor ConvertDepthStencilDescriptor(
+ const dom::GPUDepthStencilStateDescriptor& aDesc) {
+ ffi::WGPUDepthStencilStateDescriptor desc = {};
+ desc.format = ffi::WGPUTextureFormat(aDesc.mFormat);
+ desc.depth_write_enabled = aDesc.mDepthWriteEnabled;
+ desc.depth_compare = ConvertCompareFunction(aDesc.mDepthCompare);
+ desc.stencil.front = ConvertStencilFaceDescriptor(aDesc.mStencilFront);
+ desc.stencil.back = ConvertStencilFaceDescriptor(aDesc.mStencilBack);
+ desc.stencil.read_mask = aDesc.mStencilReadMask;
+ desc.stencil.write_mask = aDesc.mStencilWriteMask;
+ return desc;
+}
+
+RawId WebGPUChild::DeviceCreateRenderPipeline(
+ RawId aSelfId, const dom::GPURenderPipelineDescriptor& aDesc,
+ nsTArray<RawId>* const aImplicitBindGroupLayoutIds) {
+ ffi::WGPURenderPipelineDescriptor desc = {};
+ nsCString label, vsEntry, fsEntry;
+ ffi::WGPUProgrammableStageDescriptor vertexStage = {};
+ ffi::WGPUProgrammableStageDescriptor fragmentStage = {};
+
+ if (aDesc.mLabel.WasPassed()) {
+ LossyCopyUTF16toASCII(aDesc.mLabel.Value(), label);
+ desc.label = label.get();
+ }
+ if (aDesc.mLayout.WasPassed()) {
+ desc.layout = aDesc.mLayout.Value().mId;
+ }
+
+ vertexStage.module = aDesc.mVertexStage.mModule->mId;
+ LossyCopyUTF16toASCII(aDesc.mVertexStage.mEntryPoint, vsEntry);
+ vertexStage.entry_point = vsEntry.get();
+ desc.vertex_stage = &vertexStage;
+
+ if (aDesc.mFragmentStage.WasPassed()) {
+ const auto& stage = aDesc.mFragmentStage.Value();
+ fragmentStage.module = stage.mModule->mId;
+ LossyCopyUTF16toASCII(stage.mEntryPoint, fsEntry);
+ fragmentStage.entry_point = fsEntry.get();
+ desc.fragment_stage = &fragmentStage;
+ }
+
+ desc.primitive_topology =
+ ffi::WGPUPrimitiveTopology(aDesc.mPrimitiveTopology);
+ const auto rasterization =
+ ConvertRasterizationDescriptor(aDesc.mRasterizationState);
+ desc.rasterization_state = &rasterization;
+
+ nsTArray<ffi::WGPUColorStateDescriptor> colorStates;
+ for (const auto& colorState : aDesc.mColorStates) {
+ colorStates.AppendElement(ConvertColorDescriptor(colorState));
+ }
+ desc.color_states = colorStates.Elements();
+ desc.color_states_length = colorStates.Length();
+
+ ffi::WGPUDepthStencilStateDescriptor depthStencilState = {};
+ if (aDesc.mDepthStencilState.WasPassed()) {
+ depthStencilState =
+ ConvertDepthStencilDescriptor(aDesc.mDepthStencilState.Value());
+ desc.depth_stencil_state = &depthStencilState;
+ }
+
+ desc.vertex_state.index_format =
+ ffi::WGPUIndexFormat(aDesc.mVertexState.mIndexFormat);
+ nsTArray<ffi::WGPUVertexBufferDescriptor> vertexBuffers;
+ nsTArray<ffi::WGPUVertexAttributeDescriptor> vertexAttributes;
+ for (const auto& vertex_desc : aDesc.mVertexState.mVertexBuffers) {
+ ffi::WGPUVertexBufferDescriptor vb_desc = {};
+ if (!vertex_desc.IsNull()) {
+ const auto& vd = vertex_desc.Value();
+ vb_desc.stride = vd.mArrayStride;
+ vb_desc.step_mode = ffi::WGPUInputStepMode(vd.mStepMode);
+ // Note: we are setting the length but not the pointer
+ vb_desc.attributes_length = vd.mAttributes.Length();
+ for (const auto& vat : vd.mAttributes) {
+ ffi::WGPUVertexAttributeDescriptor ad = {};
+ ad.offset = vat.mOffset;
+ ad.format = ffi::WGPUVertexFormat(vat.mFormat);
+ ad.shader_location = vat.mShaderLocation;
+ vertexAttributes.AppendElement(ad);
+ }
+ }
+ vertexBuffers.AppendElement(vb_desc);
+ }
+ // Now patch up all the pointers to attribute lists.
+ size_t numAttributes = 0;
+ for (auto& vb_desc : vertexBuffers) {
+ vb_desc.attributes = vertexAttributes.Elements() + numAttributes;
+ numAttributes += vb_desc.attributes_length;
+ }
+
+ desc.vertex_state.vertex_buffers = vertexBuffers.Elements();
+ desc.vertex_state.vertex_buffers_length = vertexBuffers.Length();
+ desc.sample_count = aDesc.mSampleCount;
+ desc.sample_mask = aDesc.mSampleMask;
+ desc.alpha_to_coverage_enabled = aDesc.mAlphaToCoverageEnabled;
+
+ ByteBuf bb;
+ RawId implicit_bgl_ids[WGPUMAX_BIND_GROUPS] = {};
+ RawId id = ffi::wgpu_client_create_render_pipeline(
+ mClient, aSelfId, &desc, ToFFI(&bb), implicit_bgl_ids);
+
+ for (const auto& cur : implicit_bgl_ids) {
+ if (!cur) break;
+ aImplicitBindGroupLayoutIds->AppendElement(cur);
+ }
+ if (!SendDeviceAction(aSelfId, std::move(bb))) {
+ MOZ_CRASH("IPC failure");
+ }
+ return id;
+}
+
+ipc::IPCResult WebGPUChild::RecvError(RawId aDeviceId,
+ const nsACString& aMessage) {
+ if (!aDeviceId) {
+ // TODO: figure out how to report these kinds of errors
+ printf_stderr("Validation error without device target: %s\n",
+ PromiseFlatCString(aMessage).get());
+ } else if (mDeviceMap.find(aDeviceId) == mDeviceMap.end()) {
+ printf_stderr("Validation error on a dropped device: %s\n",
+ PromiseFlatCString(aMessage).get());
+ } else {
+ auto* target = mDeviceMap[aDeviceId];
+ MOZ_ASSERT(target);
+ dom::GPUUncapturedErrorEventInit init;
+ init.mError.SetAsGPUValidationError() =
+ new ValidationError(target, aMessage);
+ RefPtr<mozilla::dom::GPUUncapturedErrorEvent> event =
+ dom::GPUUncapturedErrorEvent::Constructor(target, u"uncapturederror"_ns,
+ init);
+ target->DispatchEvent(*event);
+ }
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUChild::RecvDropAction(const ipc::ByteBuf& aByteBuf) {
+ const auto* byteBuf = ToFFI(&aByteBuf);
+ ffi::wgpu_client_drop_action(mClient, byteBuf);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUChild::RecvFreeAdapter(RawId id) {
+ ffi::wgpu_client_kill_adapter_id(mClient, id);
+ return IPC_OK();
+}
+ipc::IPCResult WebGPUChild::RecvFreeDevice(RawId id) {
+ ffi::wgpu_client_kill_device_id(mClient, id);
+ return IPC_OK();
+}
+ipc::IPCResult WebGPUChild::RecvFreePipelineLayout(RawId id) {
+ ffi::wgpu_client_kill_pipeline_layout_id(mClient, id);
+ return IPC_OK();
+}
+ipc::IPCResult WebGPUChild::RecvFreeShaderModule(RawId id) {
+ ffi::wgpu_client_kill_shader_module_id(mClient, id);
+ return IPC_OK();
+}
+ipc::IPCResult WebGPUChild::RecvFreeBindGroupLayout(RawId id) {
+ ffi::wgpu_client_kill_bind_group_layout_id(mClient, id);
+ return IPC_OK();
+}
+ipc::IPCResult WebGPUChild::RecvFreeBindGroup(RawId id) {
+ ffi::wgpu_client_kill_bind_group_id(mClient, id);
+ return IPC_OK();
+}
+ipc::IPCResult WebGPUChild::RecvFreeCommandBuffer(RawId id) {
+ ffi::wgpu_client_kill_encoder_id(mClient, id);
+ return IPC_OK();
+}
+ipc::IPCResult WebGPUChild::RecvFreeRenderPipeline(RawId id) {
+ ffi::wgpu_client_kill_render_pipeline_id(mClient, id);
+ return IPC_OK();
+}
+ipc::IPCResult WebGPUChild::RecvFreeComputePipeline(RawId id) {
+ ffi::wgpu_client_kill_compute_pipeline_id(mClient, id);
+ return IPC_OK();
+}
+ipc::IPCResult WebGPUChild::RecvFreeBuffer(RawId id) {
+ ffi::wgpu_client_kill_buffer_id(mClient, id);
+ return IPC_OK();
+}
+ipc::IPCResult WebGPUChild::RecvFreeTexture(RawId id) {
+ ffi::wgpu_client_kill_texture_id(mClient, id);
+ return IPC_OK();
+}
+ipc::IPCResult WebGPUChild::RecvFreeTextureView(RawId id) {
+ ffi::wgpu_client_kill_texture_view_id(mClient, id);
+ return IPC_OK();
+}
+ipc::IPCResult WebGPUChild::RecvFreeSampler(RawId id) {
+ ffi::wgpu_client_kill_sampler_id(mClient, id);
+ return IPC_OK();
+}
+
+void WebGPUChild::DeviceCreateSwapChain(RawId aSelfId,
+ const RGBDescriptor& aRgbDesc,
+ size_t maxBufferCount,
+ wr::ExternalImageId aExternalImageId) {
+ RawId queueId = aSelfId; // TODO: multiple queues
+ nsTArray<RawId> bufferIds(maxBufferCount);
+ for (size_t i = 0; i < maxBufferCount; ++i) {
+ bufferIds.AppendElement(ffi::wgpu_client_make_buffer_id(mClient, aSelfId));
+ }
+ SendDeviceCreateSwapChain(aSelfId, queueId, aRgbDesc, bufferIds,
+ aExternalImageId);
+}
+
+void WebGPUChild::SwapChainPresent(wr::ExternalImageId aExternalImageId,
+ RawId aTextureId) {
+ // Hack: the function expects `DeviceId`, but it only uses it for `backend()`
+ // selection.
+ RawId encoderId = ffi::wgpu_client_make_encoder_id(mClient, aTextureId);
+ SendSwapChainPresent(aExternalImageId, aTextureId, encoderId);
+}
+
+void WebGPUChild::RegisterDevice(RawId aId, Device* aDevice) {
+ mDeviceMap.insert({aId, aDevice});
+}
+
+void WebGPUChild::UnregisterDevice(RawId aId) {
+ mDeviceMap.erase(aId);
+ SendDeviceDestroy(aId);
+}
+
+} // namespace webgpu
+} // namespace mozilla
diff --git a/dom/webgpu/ipc/WebGPUChild.h b/dom/webgpu/ipc/WebGPUChild.h
new file mode 100644
index 0000000000..d2bedbe4c1
--- /dev/null
+++ b/dom/webgpu/ipc/WebGPUChild.h
@@ -0,0 +1,125 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef WEBGPU_CHILD_H_
+#define WEBGPU_CHILD_H_
+
+#include "mozilla/webgpu/PWebGPUChild.h"
+#include "mozilla/MozPromise.h"
+#include "mozilla/WeakPtr.h"
+
+namespace mozilla {
+namespace dom {
+struct GPURequestAdapterOptions;
+} // namespace dom
+namespace layers {
+class CompositorBridgeChild;
+} // namespace layers
+namespace webgpu {
+namespace ffi {
+struct WGPUClient;
+struct WGPUTextureViewDescriptor;
+} // namespace ffi
+
+typedef MozPromise<RawId, Maybe<ipc::ResponseRejectReason>, true> RawIdPromise;
+
+ffi::WGPUByteBuf* ToFFI(ipc::ByteBuf* x);
+
+class WebGPUChild final : public PWebGPUChild, public SupportsWeakPtr {
+ public:
+ friend class layers::CompositorBridgeChild;
+
+ NS_DECL_CYCLE_COLLECTION_NATIVE_CLASS(WebGPUChild)
+ NS_INLINE_DECL_CYCLE_COLLECTING_NATIVE_REFCOUNTING(WebGPUChild)
+
+ public:
+ explicit WebGPUChild();
+
+ bool IsOpen() const { return mIPCOpen; }
+
+ RefPtr<RawIdPromise> InstanceRequestAdapter(
+ const dom::GPURequestAdapterOptions& aOptions);
+ Maybe<RawId> AdapterRequestDevice(RawId aSelfId,
+ const dom::GPUDeviceDescriptor& aDesc);
+ RawId DeviceCreateBuffer(RawId aSelfId,
+ const dom::GPUBufferDescriptor& aDesc);
+ RawId DeviceCreateTexture(RawId aSelfId,
+ const dom::GPUTextureDescriptor& aDesc);
+ RawId TextureCreateView(RawId aSelfId, RawId aDeviceId,
+ const dom::GPUTextureViewDescriptor& aDesc);
+ RawId DeviceCreateSampler(RawId aSelfId,
+ const dom::GPUSamplerDescriptor& aDesc);
+ RawId DeviceCreateCommandEncoder(
+ RawId aSelfId, const dom::GPUCommandEncoderDescriptor& aDesc);
+ RawId CommandEncoderFinish(RawId aSelfId, RawId aDeviceId,
+ const dom::GPUCommandBufferDescriptor& aDesc);
+ RawId DeviceCreateBindGroupLayout(
+ RawId aSelfId, const dom::GPUBindGroupLayoutDescriptor& aDesc);
+ RawId DeviceCreatePipelineLayout(
+ RawId aSelfId, const dom::GPUPipelineLayoutDescriptor& aDesc);
+ RawId DeviceCreateBindGroup(RawId aSelfId,
+ const dom::GPUBindGroupDescriptor& aDesc);
+ RawId DeviceCreateShaderModule(RawId aSelfId,
+ const dom::GPUShaderModuleDescriptor& aDesc);
+ RawId DeviceCreateComputePipeline(
+ RawId aSelfId, const dom::GPUComputePipelineDescriptor& aDesc,
+ nsTArray<RawId>* const aImplicitBindGroupLayoutIds);
+ RawId DeviceCreateRenderPipeline(
+ RawId aSelfId, const dom::GPURenderPipelineDescriptor& aDesc,
+ nsTArray<RawId>* const aImplicitBindGroupLayoutIds);
+
+ void DeviceCreateSwapChain(RawId aSelfId, const RGBDescriptor& aRgbDesc,
+ size_t maxBufferCount,
+ wr::ExternalImageId aExternalImageId);
+ void SwapChainPresent(wr::ExternalImageId aExternalImageId, RawId aTextureId);
+
+ void RegisterDevice(RawId aId, Device* aDevice);
+ void UnregisterDevice(RawId aId);
+
+ private:
+ virtual ~WebGPUChild();
+
+ // AddIPDLReference and ReleaseIPDLReference are only to be called by
+ // CompositorBridgeChild's AllocPWebGPUChild and DeallocPWebGPUChild methods
+ // respectively. We intentionally make them private to prevent misuse.
+ // The purpose of these methods is to be aware of when the IPC system around
+ // this actor goes down: mIPCOpen is then set to false.
+ void AddIPDLReference() {
+ MOZ_ASSERT(!mIPCOpen);
+ mIPCOpen = true;
+ AddRef();
+ }
+ void ReleaseIPDLReference() {
+ MOZ_ASSERT(mIPCOpen);
+ mIPCOpen = false;
+ Release();
+ }
+
+ ffi::WGPUClient* const mClient;
+ bool mIPCOpen;
+ std::unordered_map<RawId, Device*> mDeviceMap;
+
+ public:
+ ipc::IPCResult RecvError(RawId aDeviceId, const nsACString& aMessage);
+ ipc::IPCResult RecvDropAction(const ipc::ByteBuf& aByteBuf);
+ ipc::IPCResult RecvFreeAdapter(RawId id);
+ ipc::IPCResult RecvFreeDevice(RawId id);
+ ipc::IPCResult RecvFreePipelineLayout(RawId id);
+ ipc::IPCResult RecvFreeShaderModule(RawId id);
+ ipc::IPCResult RecvFreeBindGroupLayout(RawId id);
+ ipc::IPCResult RecvFreeBindGroup(RawId id);
+ ipc::IPCResult RecvFreeCommandBuffer(RawId id);
+ ipc::IPCResult RecvFreeRenderPipeline(RawId id);
+ ipc::IPCResult RecvFreeComputePipeline(RawId id);
+ ipc::IPCResult RecvFreeBuffer(RawId id);
+ ipc::IPCResult RecvFreeTexture(RawId id);
+ ipc::IPCResult RecvFreeTextureView(RawId id);
+ ipc::IPCResult RecvFreeSampler(RawId id);
+};
+
+} // namespace webgpu
+} // namespace mozilla
+
+#endif // WEBGPU_CHILD_H_
diff --git a/dom/webgpu/ipc/WebGPUParent.cpp b/dom/webgpu/ipc/WebGPUParent.cpp
new file mode 100644
index 0000000000..5728d7c242
--- /dev/null
+++ b/dom/webgpu/ipc/WebGPUParent.cpp
@@ -0,0 +1,713 @@
+/* -*- Mode: C++; tab-width: 20; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "WebGPUParent.h"
+#include "mozilla/webgpu/ffi/wgpu.h"
+#include "mozilla/layers/ImageDataSerializer.h"
+#include "mozilla/layers/TextureHost.h"
+
+namespace mozilla {
+namespace webgpu {
+
+const uint64_t POLL_TIME_MS = 100;
+
+// A helper class to force error checks coming across FFI.
+// It will assert in destructor if unchecked.
+// TODO: refactor this to avoid stack-allocating the buffer all the time.
+class ErrorBuffer {
+ // if the message doesn't fit, it will be truncated
+ static constexpr unsigned BUFFER_SIZE = 256;
+ char mUtf8[BUFFER_SIZE] = {};
+ bool mGuard = false;
+
+ public:
+ ErrorBuffer() { mUtf8[0] = 0; }
+ ErrorBuffer(const ErrorBuffer&) = delete;
+ ~ErrorBuffer() { MOZ_ASSERT(!mGuard); }
+
+ ffi::WGPUErrorBuffer ToFFI() {
+ mGuard = true;
+ ffi::WGPUErrorBuffer errorBuf = {mUtf8, BUFFER_SIZE};
+ return errorBuf;
+ }
+
+ bool CheckAndForward(PWebGPUParent* aParent, RawId aDeviceId) {
+ mGuard = false;
+ if (!mUtf8[0]) {
+ return false;
+ }
+ nsAutoCString cString(mUtf8);
+ if (!aParent->SendError(aDeviceId, cString)) {
+ NS_ERROR("Unable to SendError");
+ }
+ return true;
+ }
+};
+
+class PresentationData {
+ NS_INLINE_DECL_REFCOUNTING(PresentationData);
+
+ public:
+ RawId mDeviceId = 0;
+ RawId mQueueId = 0;
+ RefPtr<layers::MemoryTextureHost> mTextureHost;
+ uint32_t mSourcePitch = 0;
+ uint32_t mTargetPitch = 0;
+ uint32_t mRowCount = 0;
+ std::vector<RawId> mUnassignedBufferIds;
+ std::vector<RawId> mAvailableBufferIds;
+ std::vector<RawId> mQueuedBufferIds;
+ Mutex mBuffersLock;
+
+ PresentationData() : mBuffersLock("WebGPU presentation buffers") {
+ MOZ_COUNT_CTOR(PresentationData);
+ }
+
+ private:
+ ~PresentationData() { MOZ_COUNT_DTOR(PresentationData); }
+};
+
+static void FreeAdapter(RawId id, void* param) {
+ if (!static_cast<WebGPUParent*>(param)->SendFreeAdapter(id)) {
+ MOZ_CRASH("IPC failure");
+ }
+}
+static void FreeDevice(RawId id, void* param) {
+ if (!static_cast<WebGPUParent*>(param)->SendFreeDevice(id)) {
+ MOZ_CRASH("IPC failure");
+ }
+}
+static void FreeSwapChain(RawId id, void* param) {
+ Unused << id;
+ Unused << param;
+}
+static void FreePipelineLayout(RawId id, void* param) {
+ if (!static_cast<WebGPUParent*>(param)->SendFreePipelineLayout(id)) {
+ MOZ_CRASH("IPC failure");
+ }
+}
+static void FreeShaderModule(RawId id, void* param) {
+ if (!static_cast<WebGPUParent*>(param)->SendFreeShaderModule(id)) {
+ MOZ_CRASH("IPC failure");
+ }
+}
+static void FreeBindGroupLayout(RawId id, void* param) {
+ if (!static_cast<WebGPUParent*>(param)->SendFreeBindGroupLayout(id)) {
+ MOZ_CRASH("IPC failure");
+ }
+}
+static void FreeBindGroup(RawId id, void* param) {
+ if (!static_cast<WebGPUParent*>(param)->SendFreeBindGroup(id)) {
+ MOZ_CRASH("IPC failure");
+ }
+}
+static void FreeCommandBuffer(RawId id, void* param) {
+ if (!static_cast<WebGPUParent*>(param)->SendFreeCommandBuffer(id)) {
+ MOZ_CRASH("IPC failure");
+ }
+}
+static void FreeRenderPipeline(RawId id, void* param) {
+ if (!static_cast<WebGPUParent*>(param)->SendFreeRenderPipeline(id)) {
+ MOZ_CRASH("IPC failure");
+ }
+}
+static void FreeComputePipeline(RawId id, void* param) {
+ if (!static_cast<WebGPUParent*>(param)->SendFreeComputePipeline(id)) {
+ MOZ_CRASH("IPC failure");
+ }
+}
+static void FreeBuffer(RawId id, void* param) {
+ if (!static_cast<WebGPUParent*>(param)->SendFreeBuffer(id)) {
+ MOZ_CRASH("IPC failure");
+ }
+}
+static void FreeTexture(RawId id, void* param) {
+ if (!static_cast<WebGPUParent*>(param)->SendFreeTexture(id)) {
+ MOZ_CRASH("IPC failure");
+ }
+}
+static void FreeTextureView(RawId id, void* param) {
+ if (!static_cast<WebGPUParent*>(param)->SendFreeTextureView(id)) {
+ MOZ_CRASH("IPC failure");
+ }
+}
+static void FreeSampler(RawId id, void* param) {
+ if (!static_cast<WebGPUParent*>(param)->SendFreeSampler(id)) {
+ MOZ_CRASH("IPC failure");
+ }
+}
+static void FreeSurface(RawId id, void* param) {
+ Unused << id;
+ Unused << param;
+}
+
+static ffi::WGPUIdentityRecyclerFactory MakeFactory(void* param) {
+ ffi::WGPUIdentityRecyclerFactory factory = {param};
+ factory.free_adapter = FreeAdapter;
+ factory.free_device = FreeDevice;
+ factory.free_swap_chain = FreeSwapChain;
+ factory.free_pipeline_layout = FreePipelineLayout;
+ factory.free_shader_module = FreeShaderModule;
+ factory.free_bind_group_layout = FreeBindGroupLayout;
+ factory.free_bind_group = FreeBindGroup;
+ factory.free_command_buffer = FreeCommandBuffer;
+ factory.free_render_pipeline = FreeRenderPipeline;
+ factory.free_compute_pipeline = FreeComputePipeline;
+ factory.free_buffer = FreeBuffer;
+ factory.free_texture = FreeTexture;
+ factory.free_texture_view = FreeTextureView;
+ factory.free_sampler = FreeSampler;
+ factory.free_surface = FreeSurface;
+ return factory;
+}
+
+WebGPUParent::WebGPUParent()
+ : mContext(ffi::wgpu_server_new(MakeFactory(this))) {
+ mTimer.Start(base::TimeDelta::FromMilliseconds(POLL_TIME_MS), this,
+ &WebGPUParent::MaintainDevices);
+}
+
+WebGPUParent::~WebGPUParent() = default;
+
+void WebGPUParent::MaintainDevices() {
+ ffi::wgpu_server_poll_all_devices(mContext, false);
+}
+
+ipc::IPCResult WebGPUParent::RecvInstanceRequestAdapter(
+ const dom::GPURequestAdapterOptions& aOptions,
+ const nsTArray<RawId>& aTargetIds,
+ InstanceRequestAdapterResolver&& resolver) {
+ ffi::WGPURequestAdapterOptions options = {};
+ if (aOptions.mPowerPreference.WasPassed()) {
+ options.power_preference = static_cast<ffi::WGPUPowerPreference>(
+ aOptions.mPowerPreference.Value());
+ }
+ // TODO: make available backends configurable by prefs
+
+ ErrorBuffer error;
+ int8_t index = ffi::wgpu_server_instance_request_adapter(
+ mContext, &options, aTargetIds.Elements(), aTargetIds.Length(),
+ error.ToFFI());
+ if (index >= 0) {
+ resolver(aTargetIds[index]);
+ } else {
+ resolver(0);
+ }
+ error.CheckAndForward(this, 0);
+
+ // free the unused IDs
+ for (size_t i = 0; i < aTargetIds.Length(); ++i) {
+ if (static_cast<int8_t>(i) != index && !SendFreeAdapter(aTargetIds[i])) {
+ NS_ERROR("Unable to SendFreeAdapter");
+ }
+ }
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvAdapterRequestDevice(
+ RawId aSelfId, const dom::GPUDeviceDescriptor& aDesc, RawId aNewId) {
+ ffi::WGPUDeviceDescriptor desc = {};
+ desc.shader_validation = true; // required for implicit pipeline layouts
+
+ if (aDesc.mLimits.WasPassed()) {
+ const auto& lim = aDesc.mLimits.Value();
+ desc.limits.max_bind_groups = lim.mMaxBindGroups;
+ desc.limits.max_dynamic_uniform_buffers_per_pipeline_layout =
+ lim.mMaxDynamicUniformBuffersPerPipelineLayout;
+ desc.limits.max_dynamic_storage_buffers_per_pipeline_layout =
+ lim.mMaxDynamicStorageBuffersPerPipelineLayout;
+ desc.limits.max_sampled_textures_per_shader_stage =
+ lim.mMaxSampledTexturesPerShaderStage;
+ desc.limits.max_samplers_per_shader_stage = lim.mMaxSamplersPerShaderStage;
+ desc.limits.max_storage_buffers_per_shader_stage =
+ lim.mMaxStorageBuffersPerShaderStage;
+ desc.limits.max_storage_textures_per_shader_stage =
+ lim.mMaxStorageTexturesPerShaderStage;
+ desc.limits.max_uniform_buffers_per_shader_stage =
+ lim.mMaxUniformBuffersPerShaderStage;
+ desc.limits.max_uniform_buffer_binding_size =
+ lim.mMaxUniformBufferBindingSize;
+ } else {
+ ffi::wgpu_server_fill_default_limits(&desc.limits);
+ }
+
+ ErrorBuffer error;
+ ffi::wgpu_server_adapter_request_device(mContext, aSelfId, &desc, aNewId,
+ error.ToFFI());
+ error.CheckAndForward(this, 0);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvAdapterDestroy(RawId aSelfId) {
+ ffi::wgpu_server_adapter_drop(mContext, aSelfId);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvDeviceDestroy(RawId aSelfId) {
+ ffi::wgpu_server_device_drop(mContext, aSelfId);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvBufferReturnShmem(RawId aSelfId,
+ Shmem&& aShmem) {
+ mSharedMemoryMap[aSelfId] = aShmem;
+ return IPC_OK();
+}
+
+struct MapRequest {
+ const ffi::WGPUGlobal* const mContext;
+ ffi::WGPUBufferId mBufferId;
+ ffi::WGPUHostMap mHostMap;
+ uint64_t mOffset;
+ ipc::Shmem mShmem;
+ WebGPUParent::BufferMapResolver mResolver;
+ MapRequest(const ffi::WGPUGlobal* context, ffi::WGPUBufferId bufferId,
+ ffi::WGPUHostMap hostMap, uint64_t offset, ipc::Shmem&& shmem,
+ WebGPUParent::BufferMapResolver&& resolver)
+ : mContext(context),
+ mBufferId(bufferId),
+ mHostMap(hostMap),
+ mOffset(offset),
+ mShmem(shmem),
+ mResolver(resolver) {}
+};
+
+static void MapCallback(ffi::WGPUBufferMapAsyncStatus status,
+ uint8_t* userdata) {
+ auto* req = reinterpret_cast<MapRequest*>(userdata);
+ // TODO: better handle errors
+ MOZ_ASSERT(status == ffi::WGPUBufferMapAsyncStatus_Success);
+ if (req->mHostMap == ffi::WGPUHostMap_Read) {
+ const uint8_t* ptr = ffi::wgpu_server_buffer_get_mapped_range(
+ req->mContext, req->mBufferId, req->mOffset,
+ req->mShmem.Size<uint8_t>());
+ memcpy(req->mShmem.get<uint8_t>(), ptr, req->mShmem.Size<uint8_t>());
+ }
+ req->mResolver(std::move(req->mShmem));
+ delete req;
+}
+
+ipc::IPCResult WebGPUParent::RecvBufferMap(RawId aSelfId,
+ ffi::WGPUHostMap aHostMap,
+ uint64_t aOffset, uint64_t aSize,
+ BufferMapResolver&& aResolver) {
+ auto* request = new MapRequest(mContext, aSelfId, aHostMap, aOffset,
+ std::move(mSharedMemoryMap[aSelfId]),
+ std::move(aResolver));
+ ffi::WGPUBufferMapOperation mapOperation = {
+ aHostMap, &MapCallback, reinterpret_cast<uint8_t*>(request)};
+ ffi::wgpu_server_buffer_map(mContext, aSelfId, aOffset, aSize, mapOperation);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvBufferUnmap(RawId aSelfId, Shmem&& aShmem,
+ bool aFlush) {
+ if (aFlush) {
+ // TODO: flush exact modified sub-range
+ uint8_t* ptr = ffi::wgpu_server_buffer_get_mapped_range(
+ mContext, aSelfId, 0, aShmem.Size<uint8_t>());
+ MOZ_ASSERT(ptr != nullptr);
+ memcpy(ptr, aShmem.get<uint8_t>(), aShmem.Size<uint8_t>());
+ }
+
+ ffi::wgpu_server_buffer_unmap(mContext, aSelfId);
+
+ const auto iter = mSharedMemoryMap.find(aSelfId);
+ if (iter == mSharedMemoryMap.end()) {
+ DeallocShmem(aShmem);
+ } else {
+ iter->second = aShmem;
+ }
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvBufferDestroy(RawId aSelfId) {
+ ffi::wgpu_server_buffer_drop(mContext, aSelfId);
+
+ const auto iter = mSharedMemoryMap.find(aSelfId);
+ if (iter != mSharedMemoryMap.end()) {
+ DeallocShmem(iter->second);
+ mSharedMemoryMap.erase(iter);
+ }
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvTextureDestroy(RawId aSelfId) {
+ ffi::wgpu_server_texture_drop(mContext, aSelfId);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvTextureViewDestroy(RawId aSelfId) {
+ ffi::wgpu_server_texture_view_drop(mContext, aSelfId);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvSamplerDestroy(RawId aSelfId) {
+ ffi::wgpu_server_sampler_drop(mContext, aSelfId);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvCommandEncoderFinish(
+ RawId aSelfId, RawId aDeviceId,
+ const dom::GPUCommandBufferDescriptor& aDesc) {
+ Unused << aDesc;
+ ffi::WGPUCommandBufferDescriptor desc = {};
+ ErrorBuffer error;
+ ffi::wgpu_server_encoder_finish(mContext, aSelfId, &desc, error.ToFFI());
+
+ error.CheckAndForward(this, aDeviceId);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvCommandEncoderDestroy(RawId aSelfId) {
+ ffi::wgpu_server_encoder_drop(mContext, aSelfId);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvCommandBufferDestroy(RawId aSelfId) {
+ ffi::wgpu_server_command_buffer_drop(mContext, aSelfId);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvQueueSubmit(
+ RawId aSelfId, const nsTArray<RawId>& aCommandBuffers) {
+ ffi::wgpu_server_queue_submit(mContext, aSelfId, aCommandBuffers.Elements(),
+ aCommandBuffers.Length());
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvQueueWriteBuffer(RawId aSelfId,
+ RawId aBufferId,
+ uint64_t aBufferOffset,
+ Shmem&& aShmem) {
+ ffi::wgpu_server_queue_write_buffer(mContext, aSelfId, aBufferId,
+ aBufferOffset, aShmem.get<uint8_t>(),
+ aShmem.Size<uint8_t>());
+ DeallocShmem(aShmem);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvQueueWriteTexture(
+ RawId aSelfId, const ffi::WGPUTextureCopyView& aDestination, Shmem&& aShmem,
+ const ffi::WGPUTextureDataLayout& aDataLayout,
+ const ffi::WGPUExtent3d& aExtent) {
+ ffi::wgpu_server_queue_write_texture(
+ mContext, aSelfId, &aDestination, aShmem.get<uint8_t>(),
+ aShmem.Size<uint8_t>(), &aDataLayout, &aExtent);
+ DeallocShmem(aShmem);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvBindGroupLayoutDestroy(RawId aSelfId) {
+ ffi::wgpu_server_bind_group_layout_drop(mContext, aSelfId);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvPipelineLayoutDestroy(RawId aSelfId) {
+ ffi::wgpu_server_pipeline_layout_drop(mContext, aSelfId);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvBindGroupDestroy(RawId aSelfId) {
+ ffi::wgpu_server_bind_group_drop(mContext, aSelfId);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvShaderModuleDestroy(RawId aSelfId) {
+ ffi::wgpu_server_shader_module_drop(mContext, aSelfId);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvComputePipelineDestroy(RawId aSelfId) {
+ ffi::wgpu_server_compute_pipeline_drop(mContext, aSelfId);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvRenderPipelineDestroy(RawId aSelfId) {
+ ffi::wgpu_server_render_pipeline_drop(mContext, aSelfId);
+ return IPC_OK();
+}
+
+// TODO: proper destruction
+static const uint64_t kBufferAlignment = 0x100;
+
+static uint64_t Align(uint64_t value) {
+ return (value | (kBufferAlignment - 1)) + 1;
+}
+
+ipc::IPCResult WebGPUParent::RecvDeviceCreateSwapChain(
+ RawId aSelfId, RawId aQueueId, const RGBDescriptor& aDesc,
+ const nsTArray<RawId>& aBufferIds, ExternalImageId aExternalId) {
+ const auto rows = aDesc.size().height;
+ const auto bufferStride =
+ Align(static_cast<uint64_t>(aDesc.size().width) * 4);
+ const auto textureStride = layers::ImageDataSerializer::GetRGBStride(aDesc);
+ const auto wholeBufferSize = CheckedInt<size_t>(textureStride) * rows;
+ if (!wholeBufferSize.isValid()) {
+ NS_ERROR("Invalid total buffer size!");
+ return IPC_OK();
+ }
+ auto* textureHostData = new (fallible) uint8_t[wholeBufferSize.value()];
+ if (!textureHostData) {
+ NS_ERROR("Unable to allocate host data!");
+ return IPC_OK();
+ }
+ RefPtr<layers::MemoryTextureHost> textureHost = new layers::MemoryTextureHost(
+ textureHostData, aDesc, layers::TextureFlags::NO_FLAGS);
+ textureHost->DisableExternalTextures();
+ textureHost->CreateRenderTexture(aExternalId);
+ nsTArray<RawId> bufferIds(aBufferIds.Clone());
+ RefPtr<PresentationData> data = new PresentationData();
+ data->mDeviceId = aSelfId;
+ data->mQueueId = aQueueId;
+ data->mTextureHost = textureHost;
+ data->mSourcePitch = bufferStride;
+ data->mTargetPitch = textureStride;
+ data->mRowCount = rows;
+ for (const RawId id : bufferIds) {
+ data->mUnassignedBufferIds.push_back(id);
+ }
+ if (!mCanvasMap.insert({AsUint64(aExternalId), data}).second) {
+ NS_ERROR("External image is already registered as WebGPU canvas!");
+ }
+ return IPC_OK();
+}
+
+struct PresentRequest {
+ const ffi::WGPUGlobal* mContext;
+ RefPtr<PresentationData> mData;
+};
+
+static void PresentCallback(ffi::WGPUBufferMapAsyncStatus status,
+ uint8_t* userdata) {
+ auto* req = reinterpret_cast<PresentRequest*>(userdata);
+ PresentationData* data = req->mData.get();
+ // get the buffer ID
+ data->mBuffersLock.Lock();
+ RawId bufferId = data->mQueuedBufferIds.back();
+ data->mQueuedBufferIds.pop_back();
+ data->mAvailableBufferIds.push_back(bufferId);
+ data->mBuffersLock.Unlock();
+ // copy the data
+ if (status == ffi::WGPUBufferMapAsyncStatus_Success) {
+ const auto bufferSize = data->mRowCount * data->mSourcePitch;
+ const uint8_t* ptr = ffi::wgpu_server_buffer_get_mapped_range(
+ req->mContext, bufferId, 0, bufferSize);
+ uint8_t* dst = data->mTextureHost->GetBuffer();
+ for (uint32_t row = 0; row < data->mRowCount; ++row) {
+ memcpy(dst, ptr, data->mTargetPitch);
+ dst += data->mTargetPitch;
+ ptr += data->mSourcePitch;
+ }
+ wgpu_server_buffer_unmap(req->mContext, bufferId);
+ } else {
+ // TODO: better handle errors
+ NS_WARNING("WebGPU frame mapping failed!");
+ }
+ // free yourself
+ delete req;
+}
+
+ipc::IPCResult WebGPUParent::RecvSwapChainPresent(
+ wr::ExternalImageId aExternalId, RawId aTextureId,
+ RawId aCommandEncoderId) {
+ // step 0: get the data associated with the swapchain
+ const auto& lookup = mCanvasMap.find(AsUint64(aExternalId));
+ if (lookup == mCanvasMap.end()) {
+ NS_WARNING("WebGPU presenting on a destroyed swap chain!");
+ return IPC_OK();
+ }
+ RefPtr<PresentationData> data = lookup->second.get();
+ RawId bufferId = 0;
+ const auto& size = data->mTextureHost->GetSize();
+ const auto bufferSize = data->mRowCount * data->mSourcePitch;
+
+ // step 1: find an available staging buffer, or create one
+ data->mBuffersLock.Lock();
+ if (!data->mAvailableBufferIds.empty()) {
+ bufferId = data->mAvailableBufferIds.back();
+ data->mAvailableBufferIds.pop_back();
+ } else if (!data->mUnassignedBufferIds.empty()) {
+ bufferId = data->mUnassignedBufferIds.back();
+ data->mUnassignedBufferIds.pop_back();
+
+ ffi::WGPUBufferUsage usage =
+ WGPUBufferUsage_COPY_DST | WGPUBufferUsage_MAP_READ;
+ ffi::WGPUBufferDescriptor desc = {};
+ desc.size = bufferSize;
+ desc.usage = usage;
+
+ ErrorBuffer error;
+ ffi::wgpu_server_device_create_buffer(mContext, data->mDeviceId, &desc,
+ bufferId, error.ToFFI());
+ if (error.CheckAndForward(this, data->mDeviceId)) {
+ return IPC_OK();
+ }
+ } else {
+ bufferId = 0;
+ }
+ if (bufferId) {
+ data->mQueuedBufferIds.insert(data->mQueuedBufferIds.begin(), bufferId);
+ }
+ data->mBuffersLock.Unlock();
+ if (!bufferId) {
+ // TODO: add a warning - no buffer are available!
+ return IPC_OK();
+ }
+
+ // step 3: submit a copy command for the frame
+ ffi::WGPUCommandEncoderDescriptor encoderDesc = {};
+ {
+ ErrorBuffer error;
+ ffi::wgpu_server_device_create_encoder(mContext, data->mDeviceId,
+ &encoderDesc, aCommandEncoderId,
+ error.ToFFI());
+ if (error.CheckAndForward(this, data->mDeviceId)) {
+ return IPC_OK();
+ }
+ }
+
+ const ffi::WGPUTextureCopyView texView = {
+ aTextureId,
+ };
+ const ffi::WGPUTextureDataLayout bufLayout = {
+ 0,
+ data->mSourcePitch,
+ 0,
+ };
+ const ffi::WGPUBufferCopyView bufView = {
+ bufferId,
+ bufLayout,
+ };
+ const ffi::WGPUExtent3d extent = {
+ static_cast<uint32_t>(size.width),
+ static_cast<uint32_t>(size.height),
+ 1,
+ };
+ ffi::wgpu_server_encoder_copy_texture_to_buffer(mContext, aCommandEncoderId,
+ &texView, &bufView, &extent);
+ ffi::WGPUCommandBufferDescriptor commandDesc = {};
+ {
+ ErrorBuffer error;
+ ffi::wgpu_server_encoder_finish(mContext, aCommandEncoderId, &commandDesc,
+ error.ToFFI());
+ if (error.CheckAndForward(this, data->mDeviceId)) {
+ return IPC_OK();
+ }
+ }
+
+ ffi::wgpu_server_queue_submit(mContext, data->mQueueId, &aCommandEncoderId,
+ 1);
+
+ // step 4: request the pixels to be copied into the external texture
+ // TODO: this isn't strictly necessary. When WR wants to Lock() the external
+ // texture,
+ // we can just give it the contents of the last mapped buffer instead of the
+ // copy.
+ auto* const presentRequest = new PresentRequest{
+ mContext,
+ data,
+ };
+
+ ffi::WGPUBufferMapOperation mapOperation = {
+ ffi::WGPUHostMap_Read, &PresentCallback,
+ reinterpret_cast<uint8_t*>(presentRequest)};
+ ffi::wgpu_server_buffer_map(mContext, bufferId, 0, bufferSize, mapOperation);
+
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvSwapChainDestroy(
+ wr::ExternalImageId aExternalId) {
+ const auto& lookup = mCanvasMap.find(AsUint64(aExternalId));
+ MOZ_ASSERT(lookup != mCanvasMap.end());
+ RefPtr<PresentationData> data = lookup->second.get();
+ mCanvasMap.erase(AsUint64(aExternalId));
+ data->mTextureHost = nullptr;
+ layers::TextureHost::DestroyRenderTexture(aExternalId);
+
+ data->mBuffersLock.Lock();
+ for (const auto bid : data->mUnassignedBufferIds) {
+ if (!SendFreeBuffer(bid)) {
+ NS_WARNING("Unable to free an ID for non-assigned buffer");
+ }
+ }
+ for (const auto bid : data->mAvailableBufferIds) {
+ ffi::wgpu_server_buffer_drop(mContext, bid);
+ }
+ for (const auto bid : data->mQueuedBufferIds) {
+ ffi::wgpu_server_buffer_drop(mContext, bid);
+ }
+ data->mBuffersLock.Unlock();
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvShutdown() {
+ mTimer.Stop();
+ for (const auto& p : mCanvasMap) {
+ const wr::ExternalImageId extId = {p.first};
+ layers::TextureHost::DestroyRenderTexture(extId);
+ }
+ mCanvasMap.clear();
+ ffi::wgpu_server_poll_all_devices(mContext, true);
+ ffi::wgpu_server_delete(const_cast<ffi::WGPUGlobal*>(mContext));
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvDeviceAction(RawId aSelf,
+ const ipc::ByteBuf& aByteBuf) {
+ ipc::ByteBuf byteBuf;
+ ErrorBuffer error;
+ ffi::wgpu_server_device_action(mContext, aSelf, ToFFI(&aByteBuf),
+ ToFFI(&byteBuf), error.ToFFI());
+
+ if (byteBuf.mData) {
+ if (!SendDropAction(std::move(byteBuf))) {
+ NS_WARNING("Unable to set a drop action!");
+ }
+ }
+
+ error.CheckAndForward(this, aSelf);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvTextureAction(RawId aSelf, RawId aDevice,
+ const ipc::ByteBuf& aByteBuf) {
+ ErrorBuffer error;
+ ffi::wgpu_server_texture_action(mContext, aSelf, ToFFI(&aByteBuf),
+ error.ToFFI());
+
+ error.CheckAndForward(this, aDevice);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvCommandEncoderAction(
+ RawId aSelf, RawId aDevice, const ipc::ByteBuf& aByteBuf) {
+ ErrorBuffer error;
+ ffi::wgpu_server_command_encoder_action(mContext, aSelf, ToFFI(&aByteBuf),
+ error.ToFFI());
+ error.CheckAndForward(this, aDevice);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvBumpImplicitBindGroupLayout(RawId aPipelineId,
+ bool aIsCompute,
+ uint32_t aIndex,
+ RawId aAssignId) {
+ ErrorBuffer error;
+ if (aIsCompute) {
+ ffi::wgpu_server_compute_pipeline_get_bind_group_layout(
+ mContext, aPipelineId, aIndex, aAssignId, error.ToFFI());
+ } else {
+ ffi::wgpu_server_render_pipeline_get_bind_group_layout(
+ mContext, aPipelineId, aIndex, aAssignId, error.ToFFI());
+ }
+
+ error.CheckAndForward(this, 0);
+ return IPC_OK();
+}
+
+} // namespace webgpu
+} // namespace mozilla
diff --git a/dom/webgpu/ipc/WebGPUParent.h b/dom/webgpu/ipc/WebGPUParent.h
new file mode 100644
index 0000000000..9df919d55b
--- /dev/null
+++ b/dom/webgpu/ipc/WebGPUParent.h
@@ -0,0 +1,99 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef WEBGPU_PARENT_H_
+#define WEBGPU_PARENT_H_
+
+#include "mozilla/webgpu/PWebGPUParent.h"
+#include "mozilla/webrender/WebRenderAPI.h"
+#include "WebGPUTypes.h"
+#include "base/timer.h"
+
+namespace mozilla {
+namespace webgpu {
+class PresentationData;
+
+class WebGPUParent final : public PWebGPUParent {
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(WebGPUParent)
+
+ public:
+ explicit WebGPUParent();
+
+ ipc::IPCResult RecvInstanceRequestAdapter(
+ const dom::GPURequestAdapterOptions& aOptions,
+ const nsTArray<RawId>& aTargetIds,
+ InstanceRequestAdapterResolver&& resolver);
+ ipc::IPCResult RecvAdapterRequestDevice(RawId aSelfId,
+ const dom::GPUDeviceDescriptor& aDesc,
+ RawId aNewId);
+ ipc::IPCResult RecvAdapterDestroy(RawId aSelfId);
+ ipc::IPCResult RecvDeviceDestroy(RawId aSelfId);
+ ipc::IPCResult RecvBufferReturnShmem(RawId aSelfId, Shmem&& aShmem);
+ ipc::IPCResult RecvBufferMap(RawId aSelfId, ffi::WGPUHostMap aHostMap,
+ uint64_t aOffset, uint64_t size,
+ BufferMapResolver&& aResolver);
+ ipc::IPCResult RecvBufferUnmap(RawId aSelfId, Shmem&& aShmem, bool aFlush);
+ ipc::IPCResult RecvBufferDestroy(RawId aSelfId);
+ ipc::IPCResult RecvTextureDestroy(RawId aSelfId);
+ ipc::IPCResult RecvTextureViewDestroy(RawId aSelfId);
+ ipc::IPCResult RecvSamplerDestroy(RawId aSelfId);
+ ipc::IPCResult RecvCommandEncoderFinish(
+ RawId aSelfId, RawId aDeviceId,
+ const dom::GPUCommandBufferDescriptor& aDesc);
+ ipc::IPCResult RecvCommandEncoderDestroy(RawId aSelfId);
+ ipc::IPCResult RecvCommandBufferDestroy(RawId aSelfId);
+ ipc::IPCResult RecvQueueSubmit(RawId aSelfId,
+ const nsTArray<RawId>& aCommandBuffers);
+ ipc::IPCResult RecvQueueWriteBuffer(RawId aSelfId, RawId aBufferId,
+ uint64_t aBufferOffset, Shmem&& aShmem);
+ ipc::IPCResult RecvQueueWriteTexture(
+ RawId aSelfId, const ffi::WGPUTextureCopyView& aDestination,
+ Shmem&& aShmem, const ffi::WGPUTextureDataLayout& aDataLayout,
+ const ffi::WGPUExtent3d& aExtent);
+ ipc::IPCResult RecvBindGroupLayoutDestroy(RawId aSelfId);
+ ipc::IPCResult RecvPipelineLayoutDestroy(RawId aSelfId);
+ ipc::IPCResult RecvBindGroupDestroy(RawId aSelfId);
+ ipc::IPCResult RecvShaderModuleDestroy(RawId aSelfId);
+ ipc::IPCResult RecvComputePipelineDestroy(RawId aSelfId);
+ ipc::IPCResult RecvRenderPipelineDestroy(RawId aSelfId);
+ ipc::IPCResult RecvDeviceCreateSwapChain(RawId aSelfId, RawId aQueueId,
+ const layers::RGBDescriptor& aDesc,
+ const nsTArray<RawId>& aBufferIds,
+ ExternalImageId aExternalId);
+ ipc::IPCResult RecvSwapChainPresent(wr::ExternalImageId aExternalId,
+ RawId aTextureId,
+ RawId aCommandEncoderId);
+ ipc::IPCResult RecvSwapChainDestroy(wr::ExternalImageId aExternalId);
+
+ ipc::IPCResult RecvDeviceAction(RawId aSelf, const ipc::ByteBuf& aByteBuf);
+ ipc::IPCResult RecvTextureAction(RawId aSelf, RawId aDevice,
+ const ipc::ByteBuf& aByteBuf);
+ ipc::IPCResult RecvCommandEncoderAction(RawId aSelf, RawId aDevice,
+ const ipc::ByteBuf& aByteBuf);
+ ipc::IPCResult RecvBumpImplicitBindGroupLayout(RawId aPipelineId,
+ bool aIsCompute,
+ uint32_t aIndex,
+ RawId aAssignId);
+
+ ipc::IPCResult RecvShutdown();
+
+ private:
+ virtual ~WebGPUParent();
+ void MaintainDevices();
+
+ const ffi::WGPUGlobal* const mContext;
+ base::RepeatingTimer<WebGPUParent> mTimer;
+ /// Shmem associated with a mappable buffer has to be owned by one of the
+ /// processes. We keep it here for every mappable buffer while the buffer is
+ /// used by GPU.
+ std::unordered_map<uint64_t, Shmem> mSharedMemoryMap;
+ /// Associated presentation data for each swapchain.
+ std::unordered_map<uint64_t, RefPtr<PresentationData>> mCanvasMap;
+};
+
+} // namespace webgpu
+} // namespace mozilla
+
+#endif // WEBGPU_PARENT_H_
diff --git a/dom/webgpu/ipc/WebGPUSerialize.h b/dom/webgpu/ipc/WebGPUSerialize.h
new file mode 100644
index 0000000000..ffaacc0405
--- /dev/null
+++ b/dom/webgpu/ipc/WebGPUSerialize.h
@@ -0,0 +1,53 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef WEBGPU_SERIALIZE_H_
+#define WEBGPU_SERIALIZE_H_
+
+#include "WebGPUTypes.h"
+#include "ipc/EnumSerializer.h"
+#include "ipc/IPCMessageUtils.h"
+#include "mozilla/dom/WebGPUBinding.h"
+#include "mozilla/webgpu/ffi/wgpu.h"
+
+namespace IPC {
+
+#define DEFINE_IPC_SERIALIZER_ENUM_GUARD(something, guard) \
+ template <> \
+ struct ParamTraits<something> \
+ : public ContiguousEnumSerializer<something, something(0), guard> {}
+
+#define DEFINE_IPC_SERIALIZER_DOM_ENUM(something) \
+ DEFINE_IPC_SERIALIZER_ENUM_GUARD(something, something::EndGuard_)
+#define DEFINE_IPC_SERIALIZER_FFI_ENUM(something) \
+ DEFINE_IPC_SERIALIZER_ENUM_GUARD(something, something##_Sentinel)
+
+DEFINE_IPC_SERIALIZER_DOM_ENUM(mozilla::dom::GPUPowerPreference);
+
+DEFINE_IPC_SERIALIZER_FFI_ENUM(mozilla::webgpu::ffi::WGPUHostMap);
+
+DEFINE_IPC_SERIALIZER_WITHOUT_FIELDS(mozilla::dom::GPUCommandBufferDescriptor);
+
+DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::dom::GPURequestAdapterOptions,
+ mPowerPreference);
+DEFINE_IPC_SERIALIZER_WITHOUT_FIELDS(mozilla::dom::GPUExtensions);
+DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::dom::GPULimits, mMaxBindGroups);
+DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::dom::GPUDeviceDescriptor,
+ mExtensions, mLimits);
+DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::webgpu::ffi::WGPUExtent3d, width,
+ height, depth);
+DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::webgpu::ffi::WGPUOrigin3d, x, y, z);
+
+DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::webgpu::ffi::WGPUTextureDataLayout,
+ offset, bytes_per_row, rows_per_image);
+DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::webgpu::ffi::WGPUTextureCopyView,
+ texture, mip_level, origin);
+
+#undef DEFINE_IPC_SERIALIZER_FFI_ENUM
+#undef DEFINE_IPC_SERIALIZER_DOM_ENUM
+#undef DEFINE_IPC_SERIALIZER_ENUM_GUARD
+
+} // namespace IPC
+#endif // WEBGPU_SERIALIZE_H_
diff --git a/dom/webgpu/ipc/WebGPUTypes.h b/dom/webgpu/ipc/WebGPUTypes.h
new file mode 100644
index 0000000000..3e8e62afd4
--- /dev/null
+++ b/dom/webgpu/ipc/WebGPUTypes.h
@@ -0,0 +1,20 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef WEBGPU_TYPES_H_
+#define WEBGPU_TYPES_H_
+
+#include <cstdint>
+
+namespace mozilla {
+namespace webgpu {
+
+typedef uint64_t RawId;
+typedef uint64_t BufferAddress;
+
+} // namespace webgpu
+} // namespace mozilla
+
+#endif // WEBGPU_TYPES_H_
diff --git a/dom/webgpu/mochitest/mochitest-no-pref.ini b/dom/webgpu/mochitest/mochitest-no-pref.ini
new file mode 100644
index 0000000000..c1e2681367
--- /dev/null
+++ b/dom/webgpu/mochitest/mochitest-no-pref.ini
@@ -0,0 +1,4 @@
+[DEFAULT]
+subsuite = webgpu
+
+[test_disabled.html]
diff --git a/dom/webgpu/mochitest/mochitest.ini b/dom/webgpu/mochitest/mochitest.ini
new file mode 100644
index 0000000000..0a039426bd
--- /dev/null
+++ b/dom/webgpu/mochitest/mochitest.ini
@@ -0,0 +1,13 @@
+[DEFAULT]
+subsuite = webgpu
+run-if = nightly_build && webrender
+prefs =
+ dom.webgpu.enabled=true
+
+[test_enabled.html]
+[test_device_creation.html]
+[test_buffer_mapping.html]
+[test_command_buffer_creation.html]
+[test_submit_compute_empty.html]
+[test_submit_render_empty.html]
+[test_queue_write.html]
diff --git a/dom/webgpu/mochitest/test_buffer_mapping.html b/dom/webgpu/mochitest/test_buffer_mapping.html
new file mode 100644
index 0000000000..78e22470ee
--- /dev/null
+++ b/dom/webgpu/mochitest/test_buffer_mapping.html
@@ -0,0 +1,39 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+<meta charset='utf-8'>
+<script src="/tests/SimpleTest/SimpleTest.js"></script>
+<link rel="stylesheet" href="/tests/SimpleTest/test.css">
+</head>
+<body>
+<script>
+
+ok(SpecialPowers.getBoolPref('dom.webgpu.enabled'), 'Pref should be enabled.');
+
+const func = async function() {
+ const adapter = await navigator.gpu.requestAdapter();
+ const device = await adapter.requestDevice();
+
+ const bufferRead = device.createBuffer({ size:4, usage: GPUBufferUsage.MAP_READ | GPUBufferUsage.COPY_DST });
+ const bufferWrite = device.createBuffer({ size:4, usage: GPUBufferUsage.COPY_SRC, mappedAtCreation: true });
+ (new Float32Array(bufferWrite.getMappedRange())).set([1.0]);
+ bufferWrite.unmap();
+
+ const encoder = device.createCommandEncoder();
+ encoder.copyBufferToBuffer(bufferWrite, 0, bufferRead, 0, 4);
+ device.defaultQueue.submit([encoder.finish()]);
+
+ await bufferRead.mapAsync(GPUMapMode.READ);
+ const data = bufferRead.getMappedRange();
+ const value = (new Float32Array(data))[0];
+ bufferRead.unmap();
+
+ ok(value == 1.0, 'value == 1.0');
+};
+
+SimpleTest.waitForExplicitFinish();
+func().finally(() => SimpleTest.finish());
+
+</script>
+</body>
+</html>
diff --git a/dom/webgpu/mochitest/test_command_buffer_creation.html b/dom/webgpu/mochitest/test_command_buffer_creation.html
new file mode 100644
index 0000000000..31339824d5
--- /dev/null
+++ b/dom/webgpu/mochitest/test_command_buffer_creation.html
@@ -0,0 +1,26 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+<meta charset='utf-8'>
+<script src="/tests/SimpleTest/SimpleTest.js"></script>
+<link rel="stylesheet" href="/tests/SimpleTest/test.css">
+</head>
+<body>
+<script>
+
+ok(SpecialPowers.getBoolPref('dom.webgpu.enabled'), 'Pref should be enabled.');
+
+const func = async function() {
+ const adapter = await navigator.gpu.requestAdapter();
+ const device = await adapter.requestDevice();
+ const encoder = device.createCommandEncoder();
+ const command_buffer = encoder.finish();
+ ok(command_buffer !== undefined, 'command_buffer !== undefined');
+};
+
+SimpleTest.waitForExplicitFinish();
+func().finally(() => SimpleTest.finish());
+
+</script>
+</body>
+</html>
diff --git a/dom/webgpu/mochitest/test_device_creation.html b/dom/webgpu/mochitest/test_device_creation.html
new file mode 100644
index 0000000000..33f91b0811
--- /dev/null
+++ b/dom/webgpu/mochitest/test_device_creation.html
@@ -0,0 +1,24 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+<meta charset='utf-8'>
+<script src="/tests/SimpleTest/SimpleTest.js"></script>
+<link rel="stylesheet" href="/tests/SimpleTest/test.css">
+</head>
+<body>
+<script>
+
+ok(SpecialPowers.getBoolPref('dom.webgpu.enabled'), 'Pref should be enabled.');
+
+const func = async function() {
+ const adapter = await navigator.gpu.requestAdapter();
+ const device = await adapter.requestDevice();
+ ok(device !== undefined, 'device !== undefined');
+};
+
+SimpleTest.waitForExplicitFinish();
+func().finally(() => SimpleTest.finish());
+
+</script>
+</body>
+</html>
diff --git a/dom/webgpu/mochitest/test_disabled.html b/dom/webgpu/mochitest/test_disabled.html
new file mode 100644
index 0000000000..e96b8d7ecf
--- /dev/null
+++ b/dom/webgpu/mochitest/test_disabled.html
@@ -0,0 +1,16 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+<meta charset='utf-8'>
+<script src="/tests/SimpleTest/SimpleTest.js"></script>
+<link rel="stylesheet" href="/tests/SimpleTest/test.css">
+</head>
+<body>
+<script>
+
+ok(!SpecialPowers.getBoolPref('dom.webgpu.enabled'), 'Pref should be disabled.');
+ok(navigator.gpu === undefined, 'navigator.gpu === undefined');
+
+</script>
+</body>
+</html>
diff --git a/dom/webgpu/mochitest/test_enabled.html b/dom/webgpu/mochitest/test_enabled.html
new file mode 100644
index 0000000000..3f4af2177b
--- /dev/null
+++ b/dom/webgpu/mochitest/test_enabled.html
@@ -0,0 +1,16 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+<meta charset='utf-8'>
+<script src="/tests/SimpleTest/SimpleTest.js"></script>
+<link rel="stylesheet" href="/tests/SimpleTest/test.css">
+</head>
+<body>
+<script>
+
+ok(SpecialPowers.getBoolPref('dom.webgpu.enabled'), 'Pref should be enabled.');
+ok(navigator.gpu !== undefined, 'navigator.gpu !== undefined');
+
+</script>
+</body>
+</html>
diff --git a/dom/webgpu/mochitest/test_queue_write.html b/dom/webgpu/mochitest/test_queue_write.html
new file mode 100644
index 0000000000..527cdf580f
--- /dev/null
+++ b/dom/webgpu/mochitest/test_queue_write.html
@@ -0,0 +1,31 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+<meta charset='utf-8'>
+<script src="/tests/SimpleTest/SimpleTest.js"></script>
+<link rel="stylesheet" href="/tests/SimpleTest/test.css">
+</head>
+<body>
+<script>
+
+ok(SpecialPowers.getBoolPref('dom.webgpu.enabled'), 'Pref should be enabled.');
+
+const func = async function() {
+ const adapter = await navigator.gpu.requestAdapter();
+ const device = await adapter.requestDevice();
+ const buffer = device.createBuffer({size:16, usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.COPY_SRC | GPUBufferUsage.VERTEX});
+ const arrayBuf = new ArrayBuffer(16);
+ (new Int32Array(arrayBuf)).fill(5)
+ device.defaultQueue.writeBuffer(buffer, 0, arrayBuf, 0);
+ const texture = device.createTexture({size: [2,2,1], dimension: "2d", format: "rgba8unorm", usage: GPUTextureUsage.COPY_DST | GPUTextureUsage.COPY_SRC });
+ device.defaultQueue.writeTexture({ texture }, arrayBuf, { bytesPerRow:8 }, [2,2,1]);
+ // this isn't a process check, we need to read back the contents and verify the writes happened
+ ok(device !== undefined, '');
+};
+
+SimpleTest.waitForExplicitFinish();
+func().finally(() => SimpleTest.finish());
+
+</script>
+</body>
+</html>
diff --git a/dom/webgpu/mochitest/test_submit_compute_empty.html b/dom/webgpu/mochitest/test_submit_compute_empty.html
new file mode 100644
index 0000000000..bded945528
--- /dev/null
+++ b/dom/webgpu/mochitest/test_submit_compute_empty.html
@@ -0,0 +1,29 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+<meta charset='utf-8'>
+<script src="/tests/SimpleTest/SimpleTest.js"></script>
+<link rel="stylesheet" href="/tests/SimpleTest/test.css">
+</head>
+<body>
+<script>
+
+ok(SpecialPowers.getBoolPref('dom.webgpu.enabled'), 'Pref should be enabled.');
+
+const func = async function() {
+ const adapter = await navigator.gpu.requestAdapter();
+ const device = await adapter.requestDevice();
+ const encoder = device.createCommandEncoder();
+ const pass = encoder.beginComputePass();
+ pass.endPass();
+ const command_buffer = encoder.finish();
+ device.defaultQueue.submit([command_buffer]);
+ ok(command_buffer !== undefined, 'command_buffer !== undefined');
+};
+
+SimpleTest.waitForExplicitFinish();
+func().finally(() => SimpleTest.finish());
+
+</script>
+</body>
+</html>
diff --git a/dom/webgpu/mochitest/test_submit_render_empty.html b/dom/webgpu/mochitest/test_submit_render_empty.html
new file mode 100644
index 0000000000..946afd8a88
--- /dev/null
+++ b/dom/webgpu/mochitest/test_submit_render_empty.html
@@ -0,0 +1,43 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+<meta charset='utf-8'>
+<script src="/tests/SimpleTest/SimpleTest.js"></script>
+<link rel="stylesheet" href="/tests/SimpleTest/test.css">
+</head>
+<body>
+<script>
+
+ok(SpecialPowers.getBoolPref('dom.webgpu.enabled'), 'Pref should be enabled.');
+
+const func = async function() {
+ const adapter = await navigator.gpu.requestAdapter();
+ const device = await adapter.requestDevice();
+
+ const texture = device.createTexture({
+ size: { width: 100, height: 100, depth: 1 },
+ format: "rgba8unorm",
+ usage: GPUTextureUsage.OUTPUT_ATTACHMENT,
+ });
+ const view = texture.createView();
+
+ const encoder = device.createCommandEncoder();
+ const pass = encoder.beginRenderPass({
+ colorAttachments: [{
+ attachment: view,
+ loadValue: { r: 0, g: 0, b: 0, a: 0 },
+ storeOp: "store",
+ }],
+ });
+ pass.endPass();
+ const command_buffer = encoder.finish();
+ device.defaultQueue.submit([command_buffer]);
+ ok(command_buffer !== undefined, 'command_buffer !== undefined');
+};
+
+SimpleTest.waitForExplicitFinish();
+func().finally(() => SimpleTest.finish());
+
+</script>
+</body>
+</html>
diff --git a/dom/webgpu/moz.build b/dom/webgpu/moz.build
new file mode 100644
index 0000000000..17631f87db
--- /dev/null
+++ b/dom/webgpu/moz.build
@@ -0,0 +1,70 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+with Files("**"):
+ BUG_COMPONENT = ("Core", "Graphics: WebGPU")
+
+MOCHITEST_MANIFESTS += [
+ "mochitest/mochitest-no-pref.ini",
+ "mochitest/mochitest.ini",
+]
+
+DIRS += []
+
+h_and_cpp = [
+ "Adapter",
+ "BindGroup",
+ "BindGroupLayout",
+ "Buffer",
+ "CanvasContext",
+ "CommandBuffer",
+ "CommandEncoder",
+ "ComputePassEncoder",
+ "ComputePipeline",
+ "Device",
+ "DeviceLostInfo",
+ "Fence",
+ "Instance",
+ "ObjectModel",
+ "OutOfMemoryError",
+ "PipelineLayout",
+ "Queue",
+ "RenderBundle",
+ "RenderBundleEncoder",
+ "RenderPassEncoder",
+ "RenderPipeline",
+ "Sampler",
+ "ShaderModule",
+ "SwapChain",
+ "Texture",
+ "TextureView",
+ "ValidationError",
+]
+EXPORTS.mozilla.webgpu += [x + ".h" for x in h_and_cpp]
+UNIFIED_SOURCES += [x + ".cpp" for x in h_and_cpp]
+
+IPDL_SOURCES += [
+ "ipc/PWebGPU.ipdl",
+]
+
+EXPORTS.mozilla.webgpu += [
+ "ipc/WebGPUChild.h",
+ "ipc/WebGPUParent.h",
+ "ipc/WebGPUSerialize.h",
+ "ipc/WebGPUTypes.h",
+]
+
+UNIFIED_SOURCES += [
+ "ipc/WebGPUChild.cpp",
+ "ipc/WebGPUParent.cpp",
+]
+
+if CONFIG["CC_TYPE"] in ("clang", "clang-cl"):
+ CXXFLAGS += ["-Werror=implicit-int-conversion"]
+
+include("/ipc/chromium/chromium-config.mozbuild")
+
+FINAL_LIBRARY = "xul"