summaryrefslogtreecommitdiffstats
path: root/dom/webgpu
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 01:14:29 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 01:14:29 +0000
commitfbaf0bb26397aa498eb9156f06d5a6fe34dd7dd8 (patch)
tree4c1ccaf5486d4f2009f9a338a98a83e886e29c97 /dom/webgpu
parentReleasing progress-linux version 124.0.1-1~progress7.99u1. (diff)
downloadfirefox-fbaf0bb26397aa498eb9156f06d5a6fe34dd7dd8.tar.xz
firefox-fbaf0bb26397aa498eb9156f06d5a6fe34dd7dd8.zip
Merging upstream version 125.0.1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'dom/webgpu')
-rw-r--r--dom/webgpu/Adapter.cpp13
-rw-r--r--dom/webgpu/CanvasContext.h2
-rw-r--r--dom/webgpu/CommandBuffer.cpp17
-rw-r--r--dom/webgpu/CommandBuffer.h4
-rw-r--r--dom/webgpu/CommandEncoder.cpp37
-rw-r--r--dom/webgpu/CommandEncoder.h10
-rw-r--r--dom/webgpu/ComputePassEncoder.cpp26
-rw-r--r--dom/webgpu/ComputePassEncoder.h7
-rw-r--r--dom/webgpu/Device.cpp52
-rw-r--r--dom/webgpu/Queue.cpp34
-rw-r--r--dom/webgpu/RenderPassEncoder.cpp66
-rw-r--r--dom/webgpu/RenderPassEncoder.h6
-rw-r--r--dom/webgpu/SupportedFeatures.cpp3
-rw-r--r--dom/webgpu/Utility.cpp2
-rw-r--r--dom/webgpu/ipc/PWebGPU.ipdl2
-rw-r--r--dom/webgpu/ipc/WebGPUChild.cpp4
-rw-r--r--dom/webgpu/ipc/WebGPUParent.cpp82
-rw-r--r--dom/webgpu/ipc/WebGPUParent.h6
-rw-r--r--dom/webgpu/ipc/WebGPUSerialize.h5
19 files changed, 228 insertions, 150 deletions
diff --git a/dom/webgpu/Adapter.cpp b/dom/webgpu/Adapter.cpp
index 434ba7c6fa..7c0e72eac4 100644
--- a/dom/webgpu/Adapter.cpp
+++ b/dom/webgpu/Adapter.cpp
@@ -128,9 +128,6 @@ static Maybe<ffi::WGPUFeatures> ToWGPUFeatures(
case dom::GPUFeatureName::Float32_filterable:
return Some(WGPUFeatures_FLOAT32_FILTERABLE);
-
- case dom::GPUFeatureName::EndGuard_:
- break;
}
MOZ_CRASH("Bad GPUFeatureName.");
}
@@ -141,11 +138,11 @@ static Maybe<ffi::WGPUFeatures> MakeFeatureBits(
for (const auto& feature : aFeatures) {
const auto bit = ToWGPUFeatures(feature);
if (!bit) {
- const auto featureStr = dom::GPUFeatureNameValues::GetString(feature);
+ const auto featureStr = dom::GetEnumString(feature);
(void)featureStr;
NS_WARNING(
nsPrintfCString("Requested feature bit for '%s' is not implemented.",
- featureStr.data())
+ featureStr.get())
.get());
return Nothing();
}
@@ -169,7 +166,7 @@ Adapter::Adapter(Instance* const aParent, WebGPUChild* const aBridge,
auto ret = std::unordered_map<ffi::WGPUFeatures, dom::GPUFeatureName>{};
for (const auto feature :
- MakeEnumeratedRange(dom::GPUFeatureName::EndGuard_)) {
+ dom::MakeWebIDLEnumeratedRange<dom::GPUFeatureName>()) {
const auto bitForFeature = ToWGPUFeatures(feature);
if (!bitForFeature) {
// There are some features that don't have bits.
@@ -363,12 +360,12 @@ already_AddRefed<dom::Promise> Adapter::RequestDevice(
for (const auto requested : aDesc.mRequiredFeatures) {
const bool supported = mFeatures->Features().count(requested);
if (!supported) {
- const auto fstr = dom::GPUFeatureNameValues::GetString(requested);
+ const auto fstr = dom::GetEnumString(requested);
const auto astr = this->LabelOrId();
nsPrintfCString msg(
"requestDevice: Feature '%s' requested must be supported by "
"adapter %s",
- fstr.data(), astr.get());
+ fstr.get(), astr.get());
promise->MaybeRejectWithTypeError(msg);
return;
}
diff --git a/dom/webgpu/CanvasContext.h b/dom/webgpu/CanvasContext.h
index 58ef04e861..057287b26e 100644
--- a/dom/webgpu/CanvasContext.h
+++ b/dom/webgpu/CanvasContext.h
@@ -84,6 +84,8 @@ class CanvasContext final : public nsICanvasRenderingContextInternal,
already_AddRefed<layers::FwdTransactionTracker> UseCompositableForwarder(
layers::CompositableForwarder* aForwarder) override;
+ bool IsOffscreenCanvas() { return !!mOffscreenCanvas; }
+
public:
void GetCanvas(dom::OwningHTMLCanvasElementOrOffscreenCanvas&) const;
diff --git a/dom/webgpu/CommandBuffer.cpp b/dom/webgpu/CommandBuffer.cpp
index ff9bbd8d5d..59023018ef 100644
--- a/dom/webgpu/CommandBuffer.cpp
+++ b/dom/webgpu/CommandBuffer.cpp
@@ -16,10 +16,13 @@ namespace mozilla::webgpu {
GPU_IMPL_CYCLE_COLLECTION(CommandBuffer, mParent)
GPU_IMPL_JS_WRAP(CommandBuffer)
-CommandBuffer::CommandBuffer(Device* const aParent, RawId aId,
- nsTArray<WeakPtr<CanvasContext>>&& aTargetContexts,
- RefPtr<CommandEncoder>&& aEncoder)
- : ChildOf(aParent), mId(aId), mTargetContexts(std::move(aTargetContexts)) {
+CommandBuffer::CommandBuffer(
+ Device* const aParent, RawId aId,
+ nsTArray<WeakPtr<CanvasContext>>&& aPresentationContexts,
+ RefPtr<CommandEncoder>&& aEncoder)
+ : ChildOf(aParent),
+ mId(aId),
+ mPresentationContexts(std::move(aPresentationContexts)) {
mEncoder = std::move(aEncoder);
MOZ_RELEASE_ASSERT(aId);
}
@@ -33,9 +36,9 @@ Maybe<RawId> CommandBuffer::Commit() {
return Nothing();
}
mValid = false;
- for (const auto& targetContext : mTargetContexts) {
- if (targetContext) {
- targetContext->MaybeQueueSwapChainPresent();
+ for (const auto& presentationContext : mPresentationContexts) {
+ if (presentationContext) {
+ presentationContext->MaybeQueueSwapChainPresent();
}
}
return Some(mId);
diff --git a/dom/webgpu/CommandBuffer.h b/dom/webgpu/CommandBuffer.h
index b9c2495fb7..dff3e14d75 100644
--- a/dom/webgpu/CommandBuffer.h
+++ b/dom/webgpu/CommandBuffer.h
@@ -22,7 +22,7 @@ class CommandBuffer final : public ObjectBase, public ChildOf<Device> {
GPU_DECL_JS_WRAP(CommandBuffer)
CommandBuffer(Device* const aParent, RawId aId,
- nsTArray<WeakPtr<CanvasContext>>&& aTargetContexts,
+ nsTArray<WeakPtr<CanvasContext>>&& aPresentationContexts,
RefPtr<CommandEncoder>&& aEncoder);
Maybe<RawId> Commit();
@@ -33,7 +33,7 @@ class CommandBuffer final : public ObjectBase, public ChildOf<Device> {
void Cleanup();
const RawId mId;
- const nsTArray<WeakPtr<CanvasContext>> mTargetContexts;
+ const nsTArray<WeakPtr<CanvasContext>> mPresentationContexts;
// Command buffers and encoders share the same identity (this is a
// simplifcation currently made by wgpu). To avoid dropping the same ID twice,
// the wgpu resource lifetime is tied to the encoder which is held alive by
diff --git a/dom/webgpu/CommandEncoder.cpp b/dom/webgpu/CommandEncoder.cpp
index 15d95401d4..f254c9d8b9 100644
--- a/dom/webgpu/CommandEncoder.cpp
+++ b/dom/webgpu/CommandEncoder.cpp
@@ -3,6 +3,7 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+#include "mozilla/dom/UnionTypes.h"
#include "mozilla/dom/WebGPUBinding.h"
#include "CommandEncoder.h"
@@ -89,6 +90,14 @@ void CommandEncoder::Cleanup() {
}
}
+void CommandEncoder::TrackPresentationContext(CanvasContext* aTargetContext) {
+ if (aTargetContext) {
+ if (!aTargetContext->IsOffscreenCanvas()) {
+ mPresentationContexts.AppendElement(aTargetContext);
+ }
+ }
+}
+
void CommandEncoder::CopyBufferToBuffer(const Buffer& aSource,
BufferAddress aSourceOffset,
const Buffer& aDestination,
@@ -121,10 +130,7 @@ void CommandEncoder::CopyBufferToTexture(
ConvertExtent(aCopySize), ToFFI(&bb));
mBridge->SendCommandEncoderAction(mId, mParent->mId, std::move(bb));
- const auto& targetContext = aDestination.mTexture->mTargetContext;
- if (targetContext) {
- mTargetContexts.AppendElement(targetContext);
- }
+ TrackPresentationContext(aDestination.mTexture->mTargetContext);
}
void CommandEncoder::CopyTextureToBuffer(
const dom::GPUImageCopyTexture& aSource,
@@ -156,10 +162,7 @@ void CommandEncoder::CopyTextureToTexture(
ConvertExtent(aCopySize), ToFFI(&bb));
mBridge->SendCommandEncoderAction(mId, mParent->mId, std::move(bb));
- const auto& targetContext = aDestination.mTexture->mTargetContext;
- if (targetContext) {
- mTargetContexts.AppendElement(targetContext);
- }
+ TrackPresentationContext(aDestination.mTexture->mTargetContext);
}
void CommandEncoder::ClearBuffer(const Buffer& aBuffer, const uint64_t aOffset,
@@ -216,13 +219,9 @@ already_AddRefed<ComputePassEncoder> CommandEncoder::BeginComputePass(
already_AddRefed<RenderPassEncoder> CommandEncoder::BeginRenderPass(
const dom::GPURenderPassDescriptor& aDesc) {
for (const auto& at : aDesc.mColorAttachments) {
- auto* targetContext = at.mView->GetTargetContext();
- if (targetContext) {
- mTargetContexts.AppendElement(targetContext);
- }
+ TrackPresentationContext(at.mView->GetTargetContext());
if (at.mResolveTarget.WasPassed()) {
- targetContext = at.mResolveTarget.Value().GetTargetContext();
- mTargetContexts.AppendElement(targetContext);
+ TrackPresentationContext(at.mResolveTarget.Value().GetTargetContext());
}
}
@@ -230,24 +229,24 @@ already_AddRefed<RenderPassEncoder> CommandEncoder::BeginRenderPass(
return pass.forget();
}
-void CommandEncoder::EndComputePass(ffi::WGPUComputePass& aPass) {
+void CommandEncoder::EndComputePass(ffi::WGPURecordedComputePass& aPass) {
if (!mBridge->IsOpen()) {
return;
}
ipc::ByteBuf byteBuf;
ffi::wgpu_compute_pass_finish(&aPass, ToFFI(&byteBuf));
- mBridge->SendCommandEncoderAction(mId, mParent->mId, std::move(byteBuf));
+ mBridge->SendComputePass(mId, mParent->mId, std::move(byteBuf));
}
-void CommandEncoder::EndRenderPass(ffi::WGPURenderPass& aPass) {
+void CommandEncoder::EndRenderPass(ffi::WGPURecordedRenderPass& aPass) {
if (!mBridge->IsOpen()) {
return;
}
ipc::ByteBuf byteBuf;
ffi::wgpu_render_pass_finish(&aPass, ToFFI(&byteBuf));
- mBridge->SendCommandEncoderAction(mId, mParent->mId, std::move(byteBuf));
+ mBridge->SendRenderPass(mId, mParent->mId, std::move(byteBuf));
}
already_AddRefed<CommandBuffer> CommandEncoder::Finish(
@@ -263,7 +262,7 @@ already_AddRefed<CommandBuffer> CommandEncoder::Finish(
RefPtr<CommandEncoder> me(this);
RefPtr<CommandBuffer> comb = new CommandBuffer(
- mParent, mId, std::move(mTargetContexts), std::move(me));
+ mParent, mId, std::move(mPresentationContexts), std::move(me));
return comb.forget();
}
diff --git a/dom/webgpu/CommandEncoder.h b/dom/webgpu/CommandEncoder.h
index 52b10a5b2e..99a2fb15b4 100644
--- a/dom/webgpu/CommandEncoder.h
+++ b/dom/webgpu/CommandEncoder.h
@@ -32,7 +32,7 @@ using GPUExtent3D = RangeEnforcedUnsignedLongSequenceOrGPUExtent3DDict;
namespace webgpu {
namespace ffi {
struct WGPUComputePass;
-struct WGPURenderPass;
+struct WGPURecordedRenderPass;
struct WGPUImageDataLayout;
struct WGPUImageCopyTexture_TextureId;
struct WGPUExtent3d;
@@ -67,13 +67,15 @@ class CommandEncoder final : public ObjectBase, public ChildOf<Device> {
void Cleanup();
RefPtr<WebGPUChild> mBridge;
- nsTArray<WeakPtr<CanvasContext>> mTargetContexts;
+ nsTArray<WeakPtr<CanvasContext>> mPresentationContexts;
+
+ void TrackPresentationContext(CanvasContext* aTargetContext);
public:
const auto& GetDevice() const { return mParent; };
- void EndComputePass(ffi::WGPUComputePass& aPass);
- void EndRenderPass(ffi::WGPURenderPass& aPass);
+ void EndComputePass(ffi::WGPURecordedComputePass& aPass);
+ void EndRenderPass(ffi::WGPURecordedRenderPass& aPass);
void CopyBufferToBuffer(const Buffer& aSource, BufferAddress aSourceOffset,
const Buffer& aDestination,
diff --git a/dom/webgpu/ComputePassEncoder.cpp b/dom/webgpu/ComputePassEncoder.cpp
index 2820a575e8..190bbf00a7 100644
--- a/dom/webgpu/ComputePassEncoder.cpp
+++ b/dom/webgpu/ComputePassEncoder.cpp
@@ -17,13 +17,13 @@ GPU_IMPL_CYCLE_COLLECTION(ComputePassEncoder, mParent, mUsedBindGroups,
mUsedPipelines)
GPU_IMPL_JS_WRAP(ComputePassEncoder)
-void ffiWGPUComputePassDeleter::operator()(ffi::WGPUComputePass* raw) {
+void ffiWGPUComputePassDeleter::operator()(ffi::WGPURecordedComputePass* raw) {
if (raw) {
ffi::wgpu_compute_pass_destroy(raw);
}
}
-ffi::WGPUComputePass* BeginComputePass(
+ffi::WGPURecordedComputePass* BeginComputePass(
RawId aEncoderId, const dom::GPUComputePassDescriptor& aDesc) {
MOZ_RELEASE_ASSERT(aEncoderId);
ffi::WGPUComputePassDescriptor desc = {};
@@ -31,7 +31,7 @@ ffi::WGPUComputePass* BeginComputePass(
webgpu::StringHelper label(aDesc.mLabel);
desc.label = label.Get();
- return ffi::wgpu_command_encoder_begin_compute_pass(aEncoderId, &desc);
+ return ffi::wgpu_command_encoder_begin_compute_pass(&desc);
}
ComputePassEncoder::ComputePassEncoder(
@@ -49,16 +49,16 @@ void ComputePassEncoder::SetBindGroup(
const dom::Sequence<uint32_t>& aDynamicOffsets) {
if (mValid) {
mUsedBindGroups.AppendElement(&aBindGroup);
- ffi::wgpu_compute_pass_set_bind_group(mPass.get(), aSlot, aBindGroup.mId,
- aDynamicOffsets.Elements(),
- aDynamicOffsets.Length());
+ ffi::wgpu_recorded_compute_pass_set_bind_group(
+ mPass.get(), aSlot, aBindGroup.mId, aDynamicOffsets.Elements(),
+ aDynamicOffsets.Length());
}
}
void ComputePassEncoder::SetPipeline(const ComputePipeline& aPipeline) {
if (mValid) {
mUsedPipelines.AppendElement(&aPipeline);
- ffi::wgpu_compute_pass_set_pipeline(mPass.get(), aPipeline.mId);
+ ffi::wgpu_recorded_compute_pass_set_pipeline(mPass.get(), aPipeline.mId);
}
}
@@ -66,7 +66,7 @@ void ComputePassEncoder::DispatchWorkgroups(uint32_t workgroupCountX,
uint32_t workgroupCountY,
uint32_t workgroupCountZ) {
if (mValid) {
- ffi::wgpu_compute_pass_dispatch_workgroups(
+ ffi::wgpu_recorded_compute_pass_dispatch_workgroups(
mPass.get(), workgroupCountX, workgroupCountY, workgroupCountZ);
}
}
@@ -74,7 +74,7 @@ void ComputePassEncoder::DispatchWorkgroups(uint32_t workgroupCountX,
void ComputePassEncoder::DispatchWorkgroupsIndirect(
const Buffer& aIndirectBuffer, uint64_t aIndirectOffset) {
if (mValid) {
- ffi::wgpu_compute_pass_dispatch_workgroups_indirect(
+ ffi::wgpu_recorded_compute_pass_dispatch_workgroups_indirect(
mPass.get(), aIndirectBuffer.mId, aIndirectOffset);
}
}
@@ -82,18 +82,20 @@ void ComputePassEncoder::DispatchWorkgroupsIndirect(
void ComputePassEncoder::PushDebugGroup(const nsAString& aString) {
if (mValid) {
const NS_ConvertUTF16toUTF8 utf8(aString);
- ffi::wgpu_compute_pass_push_debug_group(mPass.get(), utf8.get(), 0);
+ ffi::wgpu_recorded_compute_pass_push_debug_group(mPass.get(), utf8.get(),
+ 0);
}
}
void ComputePassEncoder::PopDebugGroup() {
if (mValid) {
- ffi::wgpu_compute_pass_pop_debug_group(mPass.get());
+ ffi::wgpu_recorded_compute_pass_pop_debug_group(mPass.get());
}
}
void ComputePassEncoder::InsertDebugMarker(const nsAString& aString) {
if (mValid) {
const NS_ConvertUTF16toUTF8 utf8(aString);
- ffi::wgpu_compute_pass_insert_debug_marker(mPass.get(), utf8.get(), 0);
+ ffi::wgpu_recorded_compute_pass_insert_debug_marker(mPass.get(), utf8.get(),
+ 0);
}
}
diff --git a/dom/webgpu/ComputePassEncoder.h b/dom/webgpu/ComputePassEncoder.h
index 8160a09e2e..2455822f79 100644
--- a/dom/webgpu/ComputePassEncoder.h
+++ b/dom/webgpu/ComputePassEncoder.h
@@ -18,7 +18,7 @@ struct GPUComputePassDescriptor;
namespace webgpu {
namespace ffi {
-struct WGPUComputePass;
+struct WGPURecordedComputePass;
} // namespace ffi
class BindGroup;
@@ -27,7 +27,7 @@ class CommandEncoder;
class ComputePipeline;
struct ffiWGPUComputePassDeleter {
- void operator()(ffi::WGPUComputePass*);
+ void operator()(ffi::WGPURecordedComputePass*);
};
class ComputePassEncoder final : public ObjectBase,
@@ -43,7 +43,8 @@ class ComputePassEncoder final : public ObjectBase,
virtual ~ComputePassEncoder();
void Cleanup() {}
- std::unique_ptr<ffi::WGPUComputePass, ffiWGPUComputePassDeleter> mPass;
+ std::unique_ptr<ffi::WGPURecordedComputePass, ffiWGPUComputePassDeleter>
+ mPass;
// keep all the used objects alive while the pass is recorded
nsTArray<RefPtr<const BindGroup>> mUsedBindGroups;
nsTArray<RefPtr<const ComputePipeline>> mUsedPipelines;
diff --git a/dom/webgpu/Device.cpp b/dom/webgpu/Device.cpp
index a659047af1..a9fd5ee44c 100644
--- a/dom/webgpu/Device.cpp
+++ b/dom/webgpu/Device.cpp
@@ -247,6 +247,7 @@ already_AddRefed<Sampler> Device::CreateSampler(
desc.mipmap_filter = ffi::WGPUFilterMode(aDesc.mMipmapFilter);
desc.lod_min_clamp = aDesc.mLodMinClamp;
desc.lod_max_clamp = aDesc.mLodMaxClamp;
+ desc.max_anisotropy = aDesc.mMaxAnisotropy;
ffi::WGPUCompareFunction comparison = ffi::WGPUCompareFunction_Sentinel;
if (aDesc.mCompare.WasPassed()) {
@@ -320,8 +321,6 @@ already_AddRefed<BindGroupLayout> Device::CreateBindGroupLayout(
case dom::GPUTextureSampleType::Depth:
data.type = ffi::WGPURawTextureSampleType_Depth;
break;
- case dom::GPUTextureSampleType::EndGuard_:
- MOZ_ASSERT_UNREACHABLE();
}
}
if (entry.mStorageTexture.WasPassed()) {
@@ -349,8 +348,6 @@ already_AddRefed<BindGroupLayout> Device::CreateBindGroupLayout(
case dom::GPUBufferBindingType::Read_only_storage:
e.ty = ffi::WGPURawBindingType_ReadonlyStorageBuffer;
break;
- case dom::GPUBufferBindingType::EndGuard_:
- MOZ_ASSERT_UNREACHABLE();
}
e.has_dynamic_offset = entry.mBuffer.Value().mHasDynamicOffset;
}
@@ -361,10 +358,23 @@ already_AddRefed<BindGroupLayout> Device::CreateBindGroupLayout(
e.multisampled = entry.mTexture.Value().mMultisampled;
}
if (entry.mStorageTexture.WasPassed()) {
- e.ty = entry.mStorageTexture.Value().mAccess ==
- dom::GPUStorageTextureAccess::Write_only
- ? ffi::WGPURawBindingType_WriteonlyStorageTexture
- : ffi::WGPURawBindingType_ReadonlyStorageTexture;
+ switch (entry.mStorageTexture.Value().mAccess) {
+ case dom::GPUStorageTextureAccess::Write_only: {
+ e.ty = ffi::WGPURawBindingType_WriteonlyStorageTexture;
+ break;
+ }
+ case dom::GPUStorageTextureAccess::Read_only: {
+ e.ty = ffi::WGPURawBindingType_ReadonlyStorageTexture;
+ break;
+ }
+ case dom::GPUStorageTextureAccess::Read_write: {
+ e.ty = ffi::WGPURawBindingType_ReadWriteStorageTexture;
+ break;
+ }
+ default: {
+ MOZ_ASSERT_UNREACHABLE();
+ }
+ }
e.view_dimension = &optional[i].dim;
e.storage_texture_format = &optional[i].format;
}
@@ -379,8 +389,6 @@ already_AddRefed<BindGroupLayout> Device::CreateBindGroupLayout(
case dom::GPUSamplerBindingType::Comparison:
e.sampler_compare = true;
break;
- case dom::GPUSamplerBindingType::EndGuard_:
- MOZ_ASSERT_UNREACHABLE();
}
}
entries.AppendElement(e);
@@ -671,8 +679,12 @@ RawId CreateComputePipelineImpl(PipelineCreationContext* const aContext,
MOZ_ASSERT_UNREACHABLE();
}
desc.stage.module = aDesc.mCompute.mModule->mId;
- CopyUTF16toUTF8(aDesc.mCompute.mEntryPoint, entryPoint);
- desc.stage.entry_point = entryPoint.get();
+ if (aDesc.mCompute.mEntryPoint.WasPassed()) {
+ CopyUTF16toUTF8(aDesc.mCompute.mEntryPoint.Value(), entryPoint);
+ desc.stage.entry_point = entryPoint.get();
+ } else {
+ desc.stage.entry_point = nullptr;
+ }
RawId implicit_bgl_ids[WGPUMAX_BIND_GROUPS] = {};
RawId id = ffi::wgpu_client_create_compute_pipeline(
@@ -717,8 +729,12 @@ RawId CreateRenderPipelineImpl(PipelineCreationContext* const aContext,
{
const auto& stage = aDesc.mVertex;
vertexState.stage.module = stage.mModule->mId;
- CopyUTF16toUTF8(stage.mEntryPoint, vsEntry);
- vertexState.stage.entry_point = vsEntry.get();
+ if (stage.mEntryPoint.WasPassed()) {
+ CopyUTF16toUTF8(stage.mEntryPoint.Value(), vsEntry);
+ vertexState.stage.entry_point = vsEntry.get();
+ } else {
+ vertexState.stage.entry_point = nullptr;
+ }
for (const auto& vertex_desc : stage.mBuffers) {
ffi::WGPUVertexBufferLayout vb_desc = {};
@@ -753,8 +769,12 @@ RawId CreateRenderPipelineImpl(PipelineCreationContext* const aContext,
if (aDesc.mFragment.WasPassed()) {
const auto& stage = aDesc.mFragment.Value();
fragmentState.stage.module = stage.mModule->mId;
- CopyUTF16toUTF8(stage.mEntryPoint, fsEntry);
- fragmentState.stage.entry_point = fsEntry.get();
+ if (stage.mEntryPoint.WasPassed()) {
+ CopyUTF16toUTF8(stage.mEntryPoint.Value(), fsEntry);
+ fragmentState.stage.entry_point = fsEntry.get();
+ } else {
+ fragmentState.stage.entry_point = nullptr;
+ }
// Note: we pre-collect the blend states into a different array
// so that we can have non-stale pointers into it.
diff --git a/dom/webgpu/Queue.cpp b/dom/webgpu/Queue.cpp
index 26952ee173..ca25b2f290 100644
--- a/dom/webgpu/Queue.cpp
+++ b/dom/webgpu/Queue.cpp
@@ -69,18 +69,36 @@ void Queue::WriteBuffer(const Buffer& aBuffer, uint64_t aBufferOffset,
return;
}
- dom::ProcessTypedArraysFixed(aData, [&](const Span<const uint8_t>& aData) {
- uint64_t length = aData.Length();
- const auto checkedSize = aSize.WasPassed()
- ? CheckedInt<size_t>(aSize.Value())
- : CheckedInt<size_t>(length) - aDataOffset;
- if (!checkedSize.isValid()) {
+ size_t elementByteSize = 1;
+ if (aData.IsArrayBufferView()) {
+ auto type = aData.GetAsArrayBufferView().Type();
+ if (type != JS::Scalar::MaxTypedArrayViewType) {
+ elementByteSize = byteSize(type);
+ }
+ }
+ dom::ProcessTypedArraysFixed(aData, [&, elementByteSize](
+ const Span<const uint8_t>& aData) {
+ uint64_t byteLength = aData.Length();
+
+ auto checkedByteOffset =
+ CheckedInt<uint64_t>(aDataOffset) * elementByteSize;
+ if (!checkedByteOffset.isValid()) {
+ aRv.Throw(NS_ERROR_OUT_OF_MEMORY);
+ return;
+ }
+ auto offset = checkedByteOffset.value();
+
+ const auto checkedByteSize =
+ aSize.WasPassed() ? CheckedInt<size_t>(aSize.Value()) * elementByteSize
+ : CheckedInt<size_t>(byteLength) - offset;
+ if (!checkedByteSize.isValid()) {
aRv.Throw(NS_ERROR_OUT_OF_MEMORY);
return;
}
+ auto size = checkedByteSize.value();
- const auto& size = checkedSize.value();
- if (aDataOffset + size > length) {
+ auto checkedByteEnd = CheckedInt<uint64_t>(offset) + size;
+ if (!checkedByteEnd.isValid() || checkedByteEnd.value() > byteLength) {
aRv.ThrowAbortError(nsPrintfCString("Wrong data size %" PRIuPTR, size));
return;
}
diff --git a/dom/webgpu/RenderPassEncoder.cpp b/dom/webgpu/RenderPassEncoder.cpp
index c5cb19ce15..03c16ea3a4 100644
--- a/dom/webgpu/RenderPassEncoder.cpp
+++ b/dom/webgpu/RenderPassEncoder.cpp
@@ -18,7 +18,7 @@ GPU_IMPL_CYCLE_COLLECTION(RenderPassEncoder, mParent, mUsedBindGroups,
mUsedRenderBundles)
GPU_IMPL_JS_WRAP(RenderPassEncoder)
-void ffiWGPURenderPassDeleter::operator()(ffi::WGPURenderPass* raw) {
+void ffiWGPURenderPassDeleter::operator()(ffi::WGPURecordedRenderPass* raw) {
if (raw) {
ffi::wgpu_render_pass_destroy(raw);
}
@@ -30,8 +30,6 @@ static ffi::WGPULoadOp ConvertLoadOp(const dom::GPULoadOp& aOp) {
return ffi::WGPULoadOp_Load;
case dom::GPULoadOp::Clear:
return ffi::WGPULoadOp_Clear;
- case dom::GPULoadOp::EndGuard_:
- break;
}
MOZ_CRASH("bad GPULoadOp");
}
@@ -42,8 +40,6 @@ static ffi::WGPUStoreOp ConvertStoreOp(const dom::GPUStoreOp& aOp) {
return ffi::WGPUStoreOp_Store;
case dom::GPUStoreOp::Discard:
return ffi::WGPUStoreOp_Discard;
- case dom::GPUStoreOp::EndGuard_:
- break;
}
MOZ_CRASH("bad GPUStoreOp");
}
@@ -87,7 +83,7 @@ static ffi::WGPUColor ConvertColor(
return ffi::WGPUColor();
}
-ffi::WGPURenderPass* BeginRenderPass(
+ffi::WGPURecordedRenderPass* BeginRenderPass(
CommandEncoder* const aParent, const dom::GPURenderPassDescriptor& aDesc) {
ffi::WGPURenderPassDescriptor desc = {};
@@ -155,7 +151,7 @@ ffi::WGPURenderPass* BeginRenderPass(
}
}
- return ffi::wgpu_command_encoder_begin_render_pass(aParent->mId, &desc);
+ return ffi::wgpu_command_encoder_begin_render_pass(&desc);
}
RenderPassEncoder::RenderPassEncoder(CommandEncoder* const aParent,
@@ -186,16 +182,16 @@ void RenderPassEncoder::SetBindGroup(
const dom::Sequence<uint32_t>& aDynamicOffsets) {
if (mValid) {
mUsedBindGroups.AppendElement(&aBindGroup);
- ffi::wgpu_render_pass_set_bind_group(mPass.get(), aSlot, aBindGroup.mId,
- aDynamicOffsets.Elements(),
- aDynamicOffsets.Length());
+ ffi::wgpu_recorded_render_pass_set_bind_group(
+ mPass.get(), aSlot, aBindGroup.mId, aDynamicOffsets.Elements(),
+ aDynamicOffsets.Length());
}
}
void RenderPassEncoder::SetPipeline(const RenderPipeline& aPipeline) {
if (mValid) {
mUsedPipelines.AppendElement(&aPipeline);
- ffi::wgpu_render_pass_set_pipeline(mPass.get(), aPipeline.mId);
+ ffi::wgpu_recorded_render_pass_set_pipeline(mPass.get(), aPipeline.mId);
}
}
@@ -207,8 +203,8 @@ void RenderPassEncoder::SetIndexBuffer(const Buffer& aBuffer,
const auto iformat = aIndexFormat == dom::GPUIndexFormat::Uint32
? ffi::WGPUIndexFormat_Uint32
: ffi::WGPUIndexFormat_Uint16;
- ffi::wgpu_render_pass_set_index_buffer(mPass.get(), aBuffer.mId, iformat,
- aOffset, aSize);
+ ffi::wgpu_recorded_render_pass_set_index_buffer(mPass.get(), aBuffer.mId,
+ iformat, aOffset, aSize);
}
}
@@ -216,16 +212,17 @@ void RenderPassEncoder::SetVertexBuffer(uint32_t aSlot, const Buffer& aBuffer,
uint64_t aOffset, uint64_t aSize) {
if (mValid) {
mUsedBuffers.AppendElement(&aBuffer);
- ffi::wgpu_render_pass_set_vertex_buffer(mPass.get(), aSlot, aBuffer.mId,
- aOffset, aSize);
+ ffi::wgpu_recorded_render_pass_set_vertex_buffer(
+ mPass.get(), aSlot, aBuffer.mId, aOffset, aSize);
}
}
void RenderPassEncoder::Draw(uint32_t aVertexCount, uint32_t aInstanceCount,
uint32_t aFirstVertex, uint32_t aFirstInstance) {
if (mValid) {
- ffi::wgpu_render_pass_draw(mPass.get(), aVertexCount, aInstanceCount,
- aFirstVertex, aFirstInstance);
+ ffi::wgpu_recorded_render_pass_draw(mPass.get(), aVertexCount,
+ aInstanceCount, aFirstVertex,
+ aFirstInstance);
}
}
@@ -234,24 +231,24 @@ void RenderPassEncoder::DrawIndexed(uint32_t aIndexCount,
uint32_t aFirstIndex, int32_t aBaseVertex,
uint32_t aFirstInstance) {
if (mValid) {
- ffi::wgpu_render_pass_draw_indexed(mPass.get(), aIndexCount, aInstanceCount,
- aFirstIndex, aBaseVertex,
- aFirstInstance);
+ ffi::wgpu_recorded_render_pass_draw_indexed(mPass.get(), aIndexCount,
+ aInstanceCount, aFirstIndex,
+ aBaseVertex, aFirstInstance);
}
}
void RenderPassEncoder::DrawIndirect(const Buffer& aIndirectBuffer,
uint64_t aIndirectOffset) {
if (mValid) {
- ffi::wgpu_render_pass_draw_indirect(mPass.get(), aIndirectBuffer.mId,
- aIndirectOffset);
+ ffi::wgpu_recorded_render_pass_draw_indirect(
+ mPass.get(), aIndirectBuffer.mId, aIndirectOffset);
}
}
void RenderPassEncoder::DrawIndexedIndirect(const Buffer& aIndirectBuffer,
uint64_t aIndirectOffset) {
if (mValid) {
- ffi::wgpu_render_pass_draw_indexed_indirect(
+ ffi::wgpu_recorded_render_pass_draw_indexed_indirect(
mPass.get(), aIndirectBuffer.mId, aIndirectOffset);
}
}
@@ -259,15 +256,16 @@ void RenderPassEncoder::DrawIndexedIndirect(const Buffer& aIndirectBuffer,
void RenderPassEncoder::SetViewport(float x, float y, float width, float height,
float minDepth, float maxDepth) {
if (mValid) {
- ffi::wgpu_render_pass_set_viewport(mPass.get(), x, y, width, height,
- minDepth, maxDepth);
+ ffi::wgpu_recorded_render_pass_set_viewport(mPass.get(), x, y, width,
+ height, minDepth, maxDepth);
}
}
void RenderPassEncoder::SetScissorRect(uint32_t x, uint32_t y, uint32_t width,
uint32_t height) {
if (mValid) {
- ffi::wgpu_render_pass_set_scissor_rect(mPass.get(), x, y, width, height);
+ ffi::wgpu_recorded_render_pass_set_scissor_rect(mPass.get(), x, y, width,
+ height);
}
}
@@ -275,13 +273,14 @@ void RenderPassEncoder::SetBlendConstant(
const dom::DoubleSequenceOrGPUColorDict& color) {
if (mValid) {
ffi::WGPUColor aColor = ConvertColor(color);
- ffi::wgpu_render_pass_set_blend_constant(mPass.get(), &aColor);
+ ffi::wgpu_recorded_render_pass_set_blend_constant(mPass.get(), &aColor);
}
}
void RenderPassEncoder::SetStencilReference(uint32_t reference) {
if (mValid) {
- ffi::wgpu_render_pass_set_stencil_reference(mPass.get(), reference);
+ ffi::wgpu_recorded_render_pass_set_stencil_reference(mPass.get(),
+ reference);
}
}
@@ -293,26 +292,27 @@ void RenderPassEncoder::ExecuteBundles(
mUsedRenderBundles.AppendElement(bundle);
renderBundles.AppendElement(bundle->mId);
}
- ffi::wgpu_render_pass_execute_bundles(mPass.get(), renderBundles.Elements(),
- renderBundles.Length());
+ ffi::wgpu_recorded_render_pass_execute_bundles(
+ mPass.get(), renderBundles.Elements(), renderBundles.Length());
}
}
void RenderPassEncoder::PushDebugGroup(const nsAString& aString) {
if (mValid) {
const NS_ConvertUTF16toUTF8 utf8(aString);
- ffi::wgpu_render_pass_push_debug_group(mPass.get(), utf8.get(), 0);
+ ffi::wgpu_recorded_render_pass_push_debug_group(mPass.get(), utf8.get(), 0);
}
}
void RenderPassEncoder::PopDebugGroup() {
if (mValid) {
- ffi::wgpu_render_pass_pop_debug_group(mPass.get());
+ ffi::wgpu_recorded_render_pass_pop_debug_group(mPass.get());
}
}
void RenderPassEncoder::InsertDebugMarker(const nsAString& aString) {
if (mValid) {
const NS_ConvertUTF16toUTF8 utf8(aString);
- ffi::wgpu_render_pass_insert_debug_marker(mPass.get(), utf8.get(), 0);
+ ffi::wgpu_recorded_render_pass_insert_debug_marker(mPass.get(), utf8.get(),
+ 0);
}
}
diff --git a/dom/webgpu/RenderPassEncoder.h b/dom/webgpu/RenderPassEncoder.h
index 5ca414b4ea..b6008bd013 100644
--- a/dom/webgpu/RenderPassEncoder.h
+++ b/dom/webgpu/RenderPassEncoder.h
@@ -24,7 +24,7 @@ class AutoSequence;
} // namespace dom
namespace webgpu {
namespace ffi {
-struct WGPURenderPass;
+struct WGPURecordedRenderPass;
} // namespace ffi
class BindGroup;
@@ -35,7 +35,7 @@ class RenderPipeline;
class TextureView;
struct ffiWGPURenderPassDeleter {
- void operator()(ffi::WGPURenderPass*);
+ void operator()(ffi::WGPURecordedRenderPass*);
};
class RenderPassEncoder final : public ObjectBase,
@@ -51,7 +51,7 @@ class RenderPassEncoder final : public ObjectBase,
virtual ~RenderPassEncoder();
void Cleanup() {}
- std::unique_ptr<ffi::WGPURenderPass, ffiWGPURenderPassDeleter> mPass;
+ std::unique_ptr<ffi::WGPURecordedRenderPass, ffiWGPURenderPassDeleter> mPass;
// keep all the used objects alive while the pass is recorded
nsTArray<RefPtr<const BindGroup>> mUsedBindGroups;
nsTArray<RefPtr<const Buffer>> mUsedBuffers;
diff --git a/dom/webgpu/SupportedFeatures.cpp b/dom/webgpu/SupportedFeatures.cpp
index 294524bc81..a32879a2b0 100644
--- a/dom/webgpu/SupportedFeatures.cpp
+++ b/dom/webgpu/SupportedFeatures.cpp
@@ -5,6 +5,7 @@
#include "SupportedFeatures.h"
#include "Adapter.h"
+#include "mozilla/dom/BindingUtils.h"
#include "mozilla/dom/WebGPUBinding.h"
namespace mozilla::webgpu {
@@ -17,7 +18,7 @@ SupportedFeatures::SupportedFeatures(Adapter* const aParent)
void SupportedFeatures::Add(const dom::GPUFeatureName aFeature,
ErrorResult& aRv) {
- const auto u8 = dom::GPUFeatureNameValues::GetString(aFeature);
+ const auto u8 = dom::GetEnumString(aFeature);
const auto u16 = NS_ConvertUTF8toUTF16(u8);
dom::GPUSupportedFeatures_Binding::SetlikeHelpers::Add(this, u16, aRv);
diff --git a/dom/webgpu/Utility.cpp b/dom/webgpu/Utility.cpp
index fdb5732e8a..111bac4416 100644
--- a/dom/webgpu/Utility.cpp
+++ b/dom/webgpu/Utility.cpp
@@ -228,8 +228,6 @@ ffi::WGPUTextureFormat ConvertTextureFormat(
case dom::GPUTextureFormat::Depth32float_stencil8:
result.tag = ffi::WGPUTextureFormat_Depth32FloatStencil8;
break;
- case dom::GPUTextureFormat::EndGuard_:
- MOZ_ASSERT_UNREACHABLE();
}
// Clang will check for us that the switch above is exhaustive,
diff --git a/dom/webgpu/ipc/PWebGPU.ipdl b/dom/webgpu/ipc/PWebGPU.ipdl
index 5146dd6826..451480a1c3 100644
--- a/dom/webgpu/ipc/PWebGPU.ipdl
+++ b/dom/webgpu/ipc/PWebGPU.ipdl
@@ -44,6 +44,8 @@ parent:
async DeviceActionWithAck(RawId selfId, ByteBuf buf) returns (bool dummy);
async TextureAction(RawId selfId, RawId aDeviceId, ByteBuf buf);
async CommandEncoderAction(RawId selfId, RawId aDeviceId, ByteBuf buf);
+ async RenderPass(RawId selfId, RawId aDeviceId, ByteBuf buf);
+ async ComputePass(RawId selfId, RawId aDeviceId, ByteBuf buf);
async BumpImplicitBindGroupLayout(RawId pipelineId, bool isCompute, uint32_t index, RawId assignId);
async DeviceCreateBuffer(RawId deviceId, RawId bufferId, GPUBufferDescriptor desc, UnsafeSharedMemoryHandle shm);
diff --git a/dom/webgpu/ipc/WebGPUChild.cpp b/dom/webgpu/ipc/WebGPUChild.cpp
index 663dd5cb89..ab1a100736 100644
--- a/dom/webgpu/ipc/WebGPUChild.cpp
+++ b/dom/webgpu/ipc/WebGPUChild.cpp
@@ -40,10 +40,10 @@ void WebGPUChild::JsWarning(nsIGlobalObject* aGlobal,
if (aGlobal) {
dom::AutoJSAPI api;
if (api.Init(aGlobal)) {
- JS::WarnUTF8(api.cx(), "%s", flatString.get());
+ JS::WarnUTF8(api.cx(), "Uncaptured WebGPU error: %s", flatString.get());
}
} else {
- printf_stderr("Validation error without device target: %s\n",
+ printf_stderr("Uncaptured WebGPU error without device target: %s\n",
flatString.get());
}
}
diff --git a/dom/webgpu/ipc/WebGPUParent.cpp b/dom/webgpu/ipc/WebGPUParent.cpp
index 9b79988245..1c0560d31e 100644
--- a/dom/webgpu/ipc/WebGPUParent.cpp
+++ b/dom/webgpu/ipc/WebGPUParent.cpp
@@ -154,6 +154,12 @@ class ErrorBuffer {
}
return Some(Error{*filterType, false, nsCString{mMessageUtf8}});
}
+
+ void CoerceValidationToInternal() {
+ if (mType == ffi::WGPUErrorBufferType_Validation) {
+ mType = ffi::WGPUErrorBufferType_Internal;
+ }
+ }
};
struct PendingSwapChainDrop {
@@ -180,10 +186,9 @@ class PresentationData {
Maybe<PendingSwapChainDrop> mPendingSwapChainDrop;
const uint32_t mSourcePitch;
- std::vector<RawId> mUnassignedBufferIds MOZ_GUARDED_BY(mBuffersLock);
- std::vector<RawId> mAvailableBufferIds MOZ_GUARDED_BY(mBuffersLock);
- std::vector<RawId> mQueuedBufferIds MOZ_GUARDED_BY(mBuffersLock);
- Mutex mBuffersLock;
+ std::vector<RawId> mUnassignedBufferIds;
+ std::vector<RawId> mAvailableBufferIds;
+ std::vector<RawId> mQueuedBufferIds;
PresentationData(WebGPUParent* aParent, bool aUseExternalTextureInSwapChain,
RawId aDeviceId, RawId aQueueId,
@@ -194,8 +199,7 @@ class PresentationData {
mDeviceId(aDeviceId),
mQueueId(aQueueId),
mDesc(aDesc),
- mSourcePitch(aSourcePitch),
- mBuffersLock("WebGPU presentation buffers") {
+ mSourcePitch(aSourcePitch) {
MOZ_COUNT_CTOR(PresentationData);
for (const RawId id : aBufferIds) {
@@ -354,6 +358,11 @@ ipc::IPCResult WebGPUParent::RecvInstanceRequestAdapter(
nsAutoCString message(aMessage);
req->mParent->LoseDevice(deviceId, reason, message);
+ auto it = req->mParent->mDeviceFenceHandles.find(deviceId);
+ if (it != req->mParent->mDeviceFenceHandles.end()) {
+ req->mParent->mDeviceFenceHandles.erase(it);
+ }
+
// We're no longer tracking the memory for this callback, so erase
// it to ensure we don't leak memory.
req->mParent->mDeviceLostRequests.erase(deviceId);
@@ -394,7 +403,9 @@ ipc::IPCResult WebGPUParent::RecvAdapterRequestDevice(
HANDLE handle =
wgpu_server_get_device_fence_handle(mContext.get(), aDeviceId);
if (handle) {
- mFenceHandle = new gfx::FileHandleWrapper(UniqueFileHandle(handle));
+ RefPtr<gfx::FileHandleWrapper> fenceHandle =
+ new gfx::FileHandleWrapper(UniqueFileHandle(handle));
+ mDeviceFenceHandles.emplace(aDeviceId, std::move(fenceHandle));
}
#endif
@@ -970,20 +981,16 @@ static void ReadbackPresentCallback(ffi::WGPUBufferMapAsyncStatus status,
return;
}
- PresentationData* data = req->mData.get();
+ RefPtr<PresentationData> data = req->mData;
// get the buffer ID
RawId bufferId;
{
- MutexAutoLock lock(data->mBuffersLock);
bufferId = data->mQueuedBufferIds.back();
data->mQueuedBufferIds.pop_back();
}
// Ensure we'll make the bufferId available for reuse
- auto releaseBuffer = MakeScopeExit([data = RefPtr{data}, bufferId] {
- MutexAutoLock lock(data->mBuffersLock);
- data->mAvailableBufferIds.push_back(bufferId);
- });
+ data->mAvailableBufferIds.push_back(bufferId);
MOZ_LOG(sLogger, LogLevel::Info,
("ReadbackPresentCallback for buffer %" PRIu64 " status=%d\n",
@@ -994,15 +1001,16 @@ static void ReadbackPresentCallback(ffi::WGPUBufferMapAsyncStatus status,
ErrorBuffer getRangeError;
const auto mapped = ffi::wgpu_server_buffer_get_mapped_range(
req->mContext, bufferId, 0, bufferSize, getRangeError.ToFFI());
+ getRangeError.CoerceValidationToInternal();
if (req->mData->mParent) {
req->mData->mParent->ForwardError(data->mDeviceId, getRangeError);
- } else if (auto innerError = getRangeError.GetError()) {
- // If an error occured in get_mapped_range, treat it as an internal error
- // and crash. The error handling story for something unexpected happening
- // during the present glue needs to befigured out in a more global way.
+ }
+ if (auto innerError = getRangeError.GetError()) {
MOZ_LOG(sLogger, LogLevel::Info,
- ("WebGPU present: buffer get_mapped_range failed: %s\n",
+ ("WebGPU present: buffer get_mapped_range for internal "
+ "presentation readback failed: %s\n",
innerError->message.get()));
+ return;
}
MOZ_RELEASE_ASSERT(mapped.length >= bufferSize);
@@ -1029,11 +1037,14 @@ static void ReadbackPresentCallback(ffi::WGPUBufferMapAsyncStatus status,
}
ErrorBuffer unmapError;
wgpu_server_buffer_unmap(req->mContext, bufferId, unmapError.ToFFI());
+ unmapError.CoerceValidationToInternal();
if (req->mData->mParent) {
req->mData->mParent->ForwardError(data->mDeviceId, unmapError);
- } else if (auto innerError = unmapError.GetError()) {
+ }
+ if (auto innerError = unmapError.GetError()) {
MOZ_LOG(sLogger, LogLevel::Info,
- ("WebGPU present: buffer unmap failed: %s\n",
+ ("WebGPU present: buffer unmap for internal presentation "
+ "readback failed: %s\n",
innerError->message.get()));
}
} else {
@@ -1083,9 +1094,12 @@ void WebGPUParent::PostExternalTexture(
const auto index = aExternalTexture->GetSubmissionIndex();
MOZ_ASSERT(index != 0);
+ RefPtr<PresentationData> data = lookup->second.get();
+
Maybe<gfx::FenceInfo> fenceInfo;
- if (mFenceHandle) {
- fenceInfo = Some(gfx::FenceInfo(mFenceHandle, index));
+ auto it = mDeviceFenceHandles.find(data->mDeviceId);
+ if (it != mDeviceFenceHandles.end()) {
+ fenceInfo = Some(gfx::FenceInfo(it->second, index));
}
Maybe<layers::SurfaceDescriptor> desc =
@@ -1098,8 +1112,6 @@ void WebGPUParent::PostExternalTexture(
mRemoteTextureOwner->PushTexture(aRemoteTextureId, aOwnerId, aExternalTexture,
size, surfaceFormat, *desc);
- RefPtr<PresentationData> data = lookup->second.get();
-
auto recycledTexture = mRemoteTextureOwner->GetRecycledExternalTexture(
size, surfaceFormat, desc->type(), aOwnerId);
if (recycledTexture) {
@@ -1140,7 +1152,6 @@ ipc::IPCResult WebGPUParent::RecvSwapChainPresent(
// step 1: find an available staging buffer, or create one
{
- MutexAutoLock lock(data->mBuffersLock);
if (!data->mAvailableBufferIds.empty()) {
bufferId = data->mAvailableBufferIds.back();
data->mAvailableBufferIds.pop_back();
@@ -1285,7 +1296,6 @@ ipc::IPCResult WebGPUParent::RecvSwapChainDrop(
mPresentationDataMap.erase(lookup);
- MutexAutoLock lock(data->mBuffersLock);
ipc::ByteBuf dropByteBuf;
for (const auto bid : data->mUnassignedBufferIds) {
wgpu_server_buffer_free(bid, ToFFI(&dropByteBuf));
@@ -1351,6 +1361,24 @@ ipc::IPCResult WebGPUParent::RecvCommandEncoderAction(
return IPC_OK();
}
+ipc::IPCResult WebGPUParent::RecvRenderPass(RawId aEncoderId, RawId aDeviceId,
+ const ipc::ByteBuf& aByteBuf) {
+ ErrorBuffer error;
+ ffi::wgpu_server_render_pass(mContext.get(), aEncoderId, ToFFI(&aByteBuf),
+ error.ToFFI());
+ ForwardError(aDeviceId, error);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvComputePass(RawId aEncoderId, RawId aDeviceId,
+ const ipc::ByteBuf& aByteBuf) {
+ ErrorBuffer error;
+ ffi::wgpu_server_compute_pass(mContext.get(), aEncoderId, ToFFI(&aByteBuf),
+ error.ToFFI());
+ ForwardError(aDeviceId, error);
+ return IPC_OK();
+}
+
ipc::IPCResult WebGPUParent::RecvBumpImplicitBindGroupLayout(RawId aPipelineId,
bool aIsCompute,
uint32_t aIndex,
@@ -1426,8 +1454,6 @@ ipc::IPCResult WebGPUParent::RecvDevicePopErrorScope(
case dom::GPUErrorFilter::Internal:
ret.resultType = PopErrorScopeResultType::InternalError;
break;
- case dom::GPUErrorFilter::EndGuard_:
- MOZ_CRASH("Bad GPUErrorFilter");
}
}
return ret;
diff --git a/dom/webgpu/ipc/WebGPUParent.h b/dom/webgpu/ipc/WebGPUParent.h
index 6ad539c21e..a1eb36d723 100644
--- a/dom/webgpu/ipc/WebGPUParent.h
+++ b/dom/webgpu/ipc/WebGPUParent.h
@@ -118,6 +118,10 @@ class WebGPUParent final : public PWebGPUParent, public SupportsWeakPtr {
const ipc::ByteBuf& aByteBuf);
ipc::IPCResult RecvCommandEncoderAction(RawId aEncoderId, RawId aDeviceId,
const ipc::ByteBuf& aByteBuf);
+ ipc::IPCResult RecvRenderPass(RawId aEncoderId, RawId aDeviceId,
+ const ipc::ByteBuf& aByteBuf);
+ ipc::IPCResult RecvComputePass(RawId aEncoderId, RawId aDeviceId,
+ const ipc::ByteBuf& aByteBuf);
ipc::IPCResult RecvBumpImplicitBindGroupLayout(RawId aPipelineId,
bool aIsCompute,
uint32_t aIndex,
@@ -219,7 +223,7 @@ class WebGPUParent final : public PWebGPUParent, public SupportsWeakPtr {
nsTHashSet<RawId> mLostDeviceIds;
// Shared handle of wgpu device's fence.
- RefPtr<gfx::FileHandleWrapper> mFenceHandle;
+ std::unordered_map<RawId, RefPtr<gfx::FileHandleWrapper>> mDeviceFenceHandles;
// Store DeviceLostRequest structs for each device as unique_ptrs mapped
// to their device ids. We keep these unique_ptrs alive as long as the
diff --git a/dom/webgpu/ipc/WebGPUSerialize.h b/dom/webgpu/ipc/WebGPUSerialize.h
index 8d78d784cb..03f9ee1676 100644
--- a/dom/webgpu/ipc/WebGPUSerialize.h
+++ b/dom/webgpu/ipc/WebGPUSerialize.h
@@ -9,6 +9,7 @@
#include "WebGPUTypes.h"
#include "ipc/EnumSerializer.h"
#include "ipc/IPCMessageUtils.h"
+#include "mozilla/dom/BindingIPCUtils.h"
#include "mozilla/dom/WebGPUBinding.h"
#include "mozilla/webgpu/ffi/wgpu.h"
@@ -20,7 +21,9 @@ namespace IPC {
: public ContiguousEnumSerializer<something, something(0), guard> {}
#define DEFINE_IPC_SERIALIZER_DOM_ENUM(something) \
- DEFINE_IPC_SERIALIZER_ENUM_GUARD(something, something::EndGuard_)
+ template <> \
+ struct ParamTraits<something> \
+ : public mozilla::dom::WebIDLEnumSerializer<something> {}
#define DEFINE_IPC_SERIALIZER_FFI_ENUM(something) \
DEFINE_IPC_SERIALIZER_ENUM_GUARD(something, something##_Sentinel)